From df8bab86ce1f971bd499e37e59f9ef213cbfbb0d Mon Sep 17 00:00:00 2001 From: Luther Monson Date: Thu, 27 Jan 2022 09:03:18 -0700 Subject: [PATCH 1/3] go mod --- go.mod | 95 +- go.sum | 839 +- .../go/compute/metadata/.repo-metadata.json | 12 - .../go/compute/metadata/metadata.go | 41 +- .../github.com/Microsoft/go-winio/CODEOWNERS | 1 + vendor/github.com/Microsoft/go-winio/file.go | 18 +- .../github.com/Microsoft/go-winio/fileinfo.go | 36 +- .../github.com/Microsoft/go-winio/hvsock.go | 307 + vendor/github.com/Microsoft/go-winio/pipe.go | 244 +- .../Microsoft/go-winio/pkg/guid/guid.go | 237 + .../pkg/security/grantvmgroupaccess.go | 161 + .../go-winio/pkg/security/syscall_windows.go | 7 + .../go-winio/pkg/security/zsyscall_windows.go | 70 + .../github.com/Microsoft/go-winio/syscall.go | 2 +- .../github.com/Microsoft/go-winio/vhd/vhd.go | 323 + .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 + .../Microsoft/go-winio/zsyscall_windows.go | 551 +- .../Microsoft/hcsshim/.gitattributes | 1 + .../github.com/Microsoft/hcsshim/.gitignore | 37 + .../Microsoft/hcsshim/.golangci.yml | 99 + .../Microsoft/hcsshim/.gometalinter.json | 17 - .../github.com/Microsoft/hcsshim/CODEOWNERS | 1 + vendor/github.com/Microsoft/hcsshim/Makefile | 87 + .../Microsoft/hcsshim/Protobuild.toml | 49 + vendor/github.com/Microsoft/hcsshim/README.md | 87 +- .../github.com/Microsoft/hcsshim/appveyor.yml | 29 - .../hcsshim/computestorage/attach.go | 38 + .../hcsshim/computestorage/destroy.go | 26 + .../hcsshim/computestorage/detach.go | 26 + .../hcsshim/computestorage/export.go | 46 + .../hcsshim/computestorage/format.go | 26 + .../hcsshim/computestorage/helpers.go | 193 + .../hcsshim/computestorage/import.go | 41 + .../hcsshim/computestorage/initialize.go | 38 + .../Microsoft/hcsshim/computestorage/mount.go | 27 + .../Microsoft/hcsshim/computestorage/setup.go | 74 + .../hcsshim/computestorage/storage.go | 50 + .../computestorage/zsyscall_windows.go | 319 + .../github.com/Microsoft/hcsshim/container.go | 77 +- vendor/github.com/Microsoft/hcsshim/errors.go | 44 +- .../github.com/Microsoft/hcsshim/hcn/hcn.go | 195 +- .../Microsoft/hcsshim/hcn/hcnendpoint.go | 34 +- .../Microsoft/hcsshim/hcn/hcnerrors.go | 89 +- .../Microsoft/hcsshim/hcn/hcnglobals.go | 65 +- .../Microsoft/hcsshim/hcn/hcnloadbalancer.go | 68 +- .../Microsoft/hcsshim/hcn/hcnnamespace.go | 32 +- .../Microsoft/hcsshim/hcn/hcnnetwork.go | 60 +- .../Microsoft/hcsshim/hcn/hcnpolicy.go | 159 +- .../Microsoft/hcsshim/hcn/hcnroute.go | 266 + .../Microsoft/hcsshim/hcn/hcnsupport.go | 106 +- .../Microsoft/hcsshim/hcn/zsyscall_windows.go | 113 +- .../Microsoft/hcsshim/hnsendpoint.go | 24 + .../github.com/Microsoft/hcsshim/hnspolicy.go | 3 + .../github.com/Microsoft/hcsshim/interface.go | 2 +- .../hcsshim/internal/cni/registry.go | 2 +- .../Microsoft/hcsshim/internal/cow/cow.go | 91 + .../hcsshim/internal/guestrequest/types.go | 100 - .../Microsoft/hcsshim/internal/guid/guid.go | 69 - .../hcsshim/internal/hcs/callback.go | 107 +- .../Microsoft/hcsshim/internal/hcs/cgo.go | 7 - .../Microsoft/hcsshim/internal/hcs/errors.go | 108 +- .../Microsoft/hcsshim/internal/hcs/hcs.go | 48 - .../Microsoft/hcsshim/internal/hcs/log.go | 20 - .../Microsoft/hcsshim/internal/hcs/process.go | 536 +- .../internal/{ => hcs}/schema1/schema1.go | 21 +- .../internal/{ => hcs}/schema2/attachment.go | 7 +- .../internal/{ => hcs}/schema2/battery.go | 0 .../schema2/cache_query_stats_response.go | 1 - .../internal/{ => hcs}/schema2/chipset.go | 0 .../{ => hcs}/schema2/close_handle.go | 1 - .../internal/{ => hcs}/schema2/com_port.go | 1 - .../{ => hcs}/schema2/compute_system.go | 3 +- .../{ => hcs}/schema2/configuration.go | 32 +- .../{ => hcs}/schema2/console_size.go | 1 - .../internal/{ => hcs}/schema2/container.go | 3 +- ...r_credential_guard_add_instance_request.go | 16 + ...edential_guard_hv_socket_service_config.go | 15 + .../container_credential_guard_instance.go | 16 + ...ainer_credential_guard_modify_operation.go | 17 + ...iner_credential_guard_operation_request.go | 15 + ...edential_guard_remove_instance_request.go} | 8 +- .../container_credential_guard_state.go | 0 .../container_credential_guard_system_info.go | 14 + .../schema2/container_memory_information.go | 1 - .../hcsshim/internal/hcs/schema2/cpu_group.go | 15 + .../hcs/schema2/cpu_group_affinity.go | 15 + .../internal/hcs/schema2/cpu_group_config.go | 18 + .../hcs/schema2/cpu_group_configurations.go | 15 + .../hcs/schema2/cpu_group_operations.go | 18 + .../hcs/schema2/cpu_group_property.go | 15 + .../hcs/schema2/create_group_operation.go | 17 + .../hcs/schema2/delete_group_operation.go | 15 + .../hcsshim/internal/hcs/schema2/device.go | 27 + .../internal/{ => hcs}/schema2/devices.go | 5 +- .../{ => hcs}/schema2/enhanced_mode_video.go | 1 - .../{ => hcs}/schema2/flexible_io_device.go | 1 - .../{ => hcs}/schema2/guest_connection.go | 0 .../schema2/guest_connection_info.go | 0 .../schema2/guest_crash_reporting.go | 1 - .../internal/{ => hcs}/schema2/guest_os.go | 1 - .../internal/{ => hcs}/schema2/guest_state.go | 0 .../schema2/host_processor_modify_request.go | 16 + .../{ => hcs}/schema2/hosted_system.go | 1 - .../internal/{ => hcs}/schema2/hv_socket.go | 1 - .../internal/{ => hcs}/schema2/hv_socket_2.go | 1 - .../internal/hcs/schema2/hv_socket_address.go | 17 + .../schema2/hv_socket_service_config.go | 6 + .../schema2/hv_socket_system_config.go | 0 .../hcs/schema2/interrupt_moderation_mode.go | 42 + .../internal/hcs/schema2/iov_settings.go | 22 + .../internal/{ => hcs}/schema2/keyboard.go | 0 .../internal/{ => hcs}/schema2/layer.go | 1 - .../{ => hcs}/schema2/linux_kernel_direct.go | 0 .../internal/hcs/schema2/logical_processor.go | 18 + .../{ => hcs}/schema2/mapped_directory.go | 1 - .../internal/{ => hcs}/schema2/mapped_pipe.go | 1 - .../internal/{ => hcs}/schema2/memory.go | 3 +- .../hcsshim/internal/hcs/schema2/memory_2.go | 49 + .../schema2/memory_information_for_vm.go | 3 +- .../{ => hcs}/schema2/memory_stats.go | 7 +- .../model_container_definition_device.go | 14 + .../hcs/schema2/model_device_category.go | 15 + .../hcs/schema2/model_device_extension.go | 15 + .../hcs/schema2/model_device_instance.go | 17 + .../hcs/schema2/model_device_namespace.go | 16 + .../hcs/schema2/model_interface_class.go | 16 + .../internal/hcs/schema2/model_namespace.go | 15 + .../hcs/schema2/model_object_directory.go | 18 + .../hcs/schema2/model_object_namespace.go | 16 + .../hcs/schema2/model_object_symlink.go | 18 + .../hcs/schema2/modification_request.go | 15 + .../schema2/modify_setting_request.go | 0 .../internal/{ => hcs}/schema2/mouse.go | 0 .../{ => hcs}/schema2/network_adapter.go | 4 +- .../internal/{ => hcs}/schema2/networking.go | 1 - .../{ => hcs}/schema2/pause_notification.go | 1 - .../{ => hcs}/schema2/pause_options.go | 1 - .../internal/{ => hcs}/schema2/plan9.go | 1 - .../internal/{ => hcs}/schema2/plan9_share.go | 3 +- .../{ => hcs}/schema2/process_details.go | 1 - .../schema2/process_modify_request.go | 1 - .../{ => hcs}/schema2/process_parameters.go | 1 - .../{ => hcs}/schema2/process_status.go | 1 - .../internal/{ => hcs}/schema2/processor.go | 1 - .../internal/{ => hcs}/schema2/processor_2.go | 6 +- .../{ => hcs}/schema2/processor_stats.go | 7 +- .../hcs/schema2/processor_topology.go | 15 + .../internal/{ => hcs}/schema2/properties.go | 9 +- .../{ => hcs}/schema2/property_query.go | 5 +- .../internal/hcs/schema2/property_type.go | 26 + .../schema2/rdp_connection_options.go | 1 - .../{ => hcs}/schema2/registry_changes.go | 1 - .../{ => hcs}/schema2/registry_key.go | 1 - .../{ => hcs}/schema2/registry_value.go | 1 - .../{ => hcs}/schema2/restore_state.go | 0 .../{ => hcs}/schema2/save_options.go | 0 .../internal/{ => hcs}/schema2/scsi.go | 0 .../hcs/schema2/service_properties.go | 18 + .../schema2/shared_memory_configuration.go | 1 - .../{ => hcs}/schema2/shared_memory_region.go | 1 - .../schema2/shared_memory_region_info.go | 1 - .../{ => hcs}/schema2/silo_properties.go | 1 - .../internal/{ => hcs}/schema2/statistics.go | 3 +- .../internal/{ => hcs}/schema2/storage.go | 0 .../{ => hcs}/schema2/storage_qo_s.go | 1 - .../{ => hcs}/schema2/storage_stats.go | 9 +- .../internal/{ => hcs}/schema2/topology.go | 1 - .../internal/{ => hcs}/schema2/uefi.go | 1 - .../{ => hcs}/schema2/uefi_boot_entry.go | 1 - .../internal/{ => hcs}/schema2/version.go | 1 - .../{ => hcs}/schema2/video_monitor.go | 1 - .../{ => hcs}/schema2/virtual_machine.go | 0 .../{ => hcs}/schema2/virtual_node_info.go | 1 - .../schema2/virtual_p_mem_controller.go | 0 .../{ => hcs}/schema2/virtual_p_mem_device.go | 1 - .../hcs/schema2/virtual_p_mem_mapping.go | 15 + .../hcs/schema2/virtual_pci_device.go | 16 + .../hcs/schema2/virtual_pci_function.go | 18 + .../internal/{ => hcs}/schema2/virtual_smb.go | 1 - .../{ => hcs}/schema2/virtual_smb_share.go | 1 - .../schema2/virtual_smb_share_options.go | 1 - .../internal/{ => hcs}/schema2/vm_memory.go | 5 +- .../hcs/schema2/vm_processor_limits.go | 22 + .../schema2/windows_crash_reporting.go | 1 - .../Microsoft/hcsshim/internal/hcs/service.go | 49 + .../Microsoft/hcsshim/internal/hcs/system.go | 720 +- .../Microsoft/hcsshim/internal/hcs/utils.go | 29 + .../hcsshim/internal/hcs/waithelper.go | 19 +- .../Microsoft/hcsshim/internal/hcs/watcher.go | 41 - .../hcsshim/internal/hns/hnsendpoint.go | 76 + .../hcsshim/internal/hns/hnsfuncs.go | 15 +- .../hcsshim/internal/hns/hnsnetwork.go | 16 +- .../hcsshim/internal/hns/hnspolicy.go | 45 +- .../hcsshim/internal/hns/namespace.go | 7 +- .../hcsshim/internal/interop/interop.go | 4 - .../Microsoft/hcsshim/internal/log/g.go | 23 + .../Microsoft/hcsshim/internal/oc/exporter.go | 43 + .../Microsoft/hcsshim/internal/oc/span.go | 17 + .../hcsshim/internal/regstate/regstate.go | 9 +- .../hcsshim/internal/runhcs/container.go | 4 +- .../hcsshim/internal/safefile/safeopen.go | 162 +- .../internal/safefile/zsyscall_windows.go | 79 - .../hcsshim/internal/schema2/memory_2.go | 25 - .../hcsshim/internal/timeout/timeout.go | 4 + .../hcsshim/internal/vmcompute/vmcompute.go | 610 + .../{hcs => vmcompute}/zsyscall_windows.go | 136 +- .../hcsshim/internal/wclayer/activatelayer.go | 25 +- .../hcsshim/internal/wclayer/baselayer.go | 29 +- .../hcsshim/internal/wclayer/createlayer.go | 28 +- .../internal/wclayer/createscratchlayer.go | 34 +- .../internal/wclayer/deactivatelayer.go | 23 +- .../hcsshim/internal/wclayer/destroylayer.go | 25 +- .../internal/wclayer/expandscratchsize.go | 138 +- .../hcsshim/internal/wclayer/exportlayer.go | 62 +- .../internal/wclayer/getlayermountpath.go | 38 +- .../internal/wclayer/getsharedbaseimages.go | 24 +- .../hcsshim/internal/wclayer/grantvmaccess.go | 28 +- .../hcsshim/internal/wclayer/importlayer.go | 75 +- .../hcsshim/internal/wclayer/layerexists.go | 27 +- .../hcsshim/internal/wclayer/layerid.go | 15 +- .../hcsshim/internal/wclayer/layerutils.go | 7 +- .../hcsshim/internal/wclayer/legacy.go | 42 +- .../hcsshim/internal/wclayer/nametoguid.go | 33 +- .../hcsshim/internal/wclayer/preparelayer.go | 29 +- .../hcsshim/internal/wclayer/processimage.go | 32 +- .../internal/wclayer/unpreparelayer.go | 25 +- .../hcsshim/internal/wclayer/wclayer.go | 10 +- .../internal/wclayer/zsyscall_windows.go | 59 + .../hcsshim/internal/winapi/console.go | 44 + .../hcsshim/internal/winapi/devices.go | 13 + .../hcsshim/internal/winapi/errors.go | 15 + .../hcsshim/internal/winapi/filesystem.go | 110 + .../Microsoft/hcsshim/internal/winapi/iocp.go | 3 + .../hcsshim/internal/winapi/jobobject.go | 215 + .../hcsshim/internal/winapi/logon.go | 30 + .../hcsshim/internal/winapi/memory.go | 4 + .../Microsoft/hcsshim/internal/winapi/net.go | 3 + .../Microsoft/hcsshim/internal/winapi/path.go | 11 + .../hcsshim/internal/winapi/process.go | 8 + .../hcsshim/internal/winapi/processor.go | 7 + .../hcsshim/internal/winapi/system.go | 52 + .../hcsshim/internal/winapi/thread.go | 12 + .../hcsshim/internal/winapi/utils.go | 80 + .../hcsshim/internal/winapi/winapi.go | 5 + .../internal/winapi/zsyscall_windows.go | 360 + vendor/github.com/Microsoft/hcsshim/layer.go | 47 +- .../hcsshim/osversion/osversion_windows.go | 50 + .../hcsshim/osversion/windowsbuilds.go | 50 + .../github.com/Microsoft/hcsshim/process.go | 38 +- .../github.com/Microsoft/hcsshim/vendor.conf | 21 - .../aws/aws-sdk-go/aws/client/client.go | 10 +- .../aws-sdk-go/aws/client/default_retryer.go | 28 +- .../aws/aws-sdk-go/aws/client/logger.go | 106 +- .../aws/client/metadata/client_info.go | 1 + .../github.com/aws/aws-sdk-go/aws/config.go | 22 + .../aws-sdk-go/aws/corehandlers/handlers.go | 28 +- .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws-sdk-go/aws/credentials/credentials.go | 27 +- .../ec2rolecreds/ec2_role_provider.go | 6 +- .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 + .../aws/aws-sdk-go/aws/csm/enable.go | 67 + .../aws/aws-sdk-go/aws/csm/metric.go | 51 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../aws/aws-sdk-go/aws/csm/reporter.go | 231 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 22 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 8 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 24 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 22 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 690 +- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 22 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 12 +- .../github.com/aws/aws-sdk-go/aws/logger.go | 6 + .../aws/aws-sdk-go/aws/request/handlers.go | 18 + .../aws-sdk-go/aws/request/offset_reader.go | 4 +- .../aws/aws-sdk-go/aws/request/request.go | 62 +- .../aws/aws-sdk-go/aws/request/request_1_7.go | 2 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 2 +- .../aws/request/request_pagination.go | 40 +- .../aws/aws-sdk-go/aws/request/retryer.go | 2 +- .../aws/aws-sdk-go/aws/session/doc.go | 2 +- .../aws/aws-sdk-go/aws/session/env_config.go | 28 + .../aws/aws-sdk-go/aws/session/session.go | 64 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 75 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 83 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../private/protocol/ec2query/build.go | 2 +- .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 2 +- .../protocol/query/queryutil/queryutil.go | 7 +- .../aws-sdk-go/private/protocol/rest/build.go | 12 +- .../private/protocol/rest/unmarshal.go | 6 +- .../aws-sdk-go/private/protocol/timestamp.go | 72 + .../private/protocol/xml/xmlutil/build.go | 30 +- .../private/protocol/xml/xmlutil/unmarshal.go | 20 +- .../protocol/xml/xmlutil/xml_to_struct.go | 1 + .../aws/aws-sdk-go/service/ec2/api.go | 6522 +- .../aws-sdk-go/service/ec2/customizations.go | 53 + .../aws/aws-sdk-go/service/ec2/doc.go | 20 +- .../aws/aws-sdk-go/service/ec2/service.go | 6 +- .../aws/aws-sdk-go/service/sts/api.go | 170 +- .../aws/aws-sdk-go/service/sts/service.go | 6 +- .../bronze1man/goStrongswanVici/go.mod | 7 - .../bronze1man/goStrongswanVici/go.sum | 6 - vendor/github.com/containerd/cgroups/LICENSE | 201 + .../containerd/cgroups/stats/v1/doc.go | 17 + .../containerd/cgroups/stats/v1/metrics.pb.go | 6125 ++ .../cgroups/stats/v1/metrics.pb.txt | 790 + .../containerd/cgroups/stats/v1/metrics.proto | 158 + .../coreos/etcd/client/keys.generated.go | 1087 - vendor/github.com/coreos/etcd/client/srv.go | 65 - .../coreos/etcd/pkg/transport/listener.go | 276 - .../coreos/go-iptables/iptables/iptables.go | 112 +- .../coreos/{etcd => go-semver}/LICENSE | 0 vendor/github.com/coreos/go-semver/NOTICE | 5 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + .../coreos/go-systemd/journal/journal.go | 143 +- .../github.com/coreos/pkg/capnslog/README.md | 2 +- vendor/github.com/coreos/pkg/capnslog/init.go | 2 +- .../github.com/coreos/pkg/capnslog/logmap.go | 5 + .../coreos/pkg/capnslog/pkg_logger.go | 14 + .../denverdino/aliyungo/common/client.go | 223 +- .../denverdino/aliyungo/common/endpoint.go | 114 +- .../denverdino/aliyungo/common/endpoints.xml | 21 + .../denverdino/aliyungo/common/regions.go | 27 +- .../denverdino/aliyungo/common/request.go | 6 +- .../denverdino/aliyungo/common/types.go | 18 + .../denverdino/aliyungo/ecs/client.go | 74 +- .../denverdino/aliyungo/ecs/disks.go | 43 +- .../github.com/denverdino/aliyungo/ecs/eni.go | 183 + .../denverdino/aliyungo/ecs/forward_entry.go | 16 +- .../denverdino/aliyungo/ecs/images.go | 43 +- .../denverdino/aliyungo/ecs/instance_types.go | 17 +- .../denverdino/aliyungo/ecs/instances.go | 293 +- .../denverdino/aliyungo/ecs/nat_gateway.go | 35 +- .../denverdino/aliyungo/ecs/networks.go | 54 +- .../denverdino/aliyungo/ecs/route_tables.go | 20 +- .../aliyungo/ecs/router_interface.go | 257 + .../aliyungo/ecs/security_groups.go | 51 +- .../denverdino/aliyungo/ecs/snapshots.go | 19 +- .../denverdino/aliyungo/ecs/snat_entry.go | 16 +- .../denverdino/aliyungo/ecs/ssh_key_pair.go | 27 +- .../denverdino/aliyungo/ecs/tags.go | 14 +- .../denverdino/aliyungo/ecs/vpcs.go | 20 +- .../denverdino/aliyungo/ecs/vrouters.go | 17 +- .../denverdino/aliyungo/ecs/vswitches.go | 17 +- .../denverdino/aliyungo/ecs/zones.go | 103 +- .../denverdino/aliyungo/metadata/client.go | 266 +- .../denverdino/aliyungo/util/encoding.go | 45 +- .../denverdino/aliyungo/util/signature.go | 1 - .../denverdino/aliyungo/util/util.go | 38 + vendor/github.com/go-logr/logr/go.mod | 3 - .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../gogo/protobuf/proto/text_parser.go | 2 +- .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 + .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + .../golang/protobuf/proto/registry.go | 10 +- .../golang/protobuf/proto/text_decode.go | 2 +- .../github.com/golang/protobuf/ptypes/any.go | 14 + .../github.com/golang/protobuf/ptypes/doc.go | 4 + .../golang/protobuf/ptypes/duration.go | 4 + .../golang/protobuf/ptypes/timestamp.go | 9 + vendor/github.com/google/gofuzz/go.mod | 3 - vendor/github.com/googleapis/gax-go/v2/go.mod | 3 - vendor/github.com/googleapis/gax-go/v2/go.sum | 25 - vendor/github.com/hashicorp/golang-lru/go.mod | 1 - .../github.com/imdario/mergo/.deepsource.toml | 12 + vendor/github.com/imdario/mergo/.travis.yml | 5 + vendor/github.com/imdario/mergo/README.md | 83 +- vendor/github.com/imdario/mergo/doc.go | 141 +- vendor/github.com/imdario/mergo/map.go | 4 + vendor/github.com/imdario/mergo/merge.go | 177 +- vendor/github.com/imdario/mergo/mergo.go | 21 +- .../github.com/jonboulle/clockwork/.gitignore | 2 + .../jonboulle/clockwork/.travis.yml | 5 - .../github.com/jonboulle/clockwork/README.md | 67 +- .../jonboulle/clockwork/clockwork.go | 28 +- .../github.com/jonboulle/clockwork/ticker.go | 72 + vendor/github.com/josharian/native/go.mod | 3 - vendor/github.com/json-iterator/go/go.mod | 11 - vendor/github.com/json-iterator/go/go.sum | 14 - .../github.com/json-iterator/go/iter_float.go | 3 + .../github.com/json-iterator/go/iter_int.go | 3 +- vendor/github.com/json-iterator/go/reflect.go | 2 +- .../go/reflect_json_raw_message.go | 24 +- .../go/reflect_struct_decoder.go | 5 + vendor/github.com/mdlayher/genetlink/go.mod | 10 - vendor/github.com/mdlayher/genetlink/go.sum | 22 - vendor/github.com/mdlayher/netlink/go.mod | 13 - vendor/github.com/mdlayher/netlink/go.sum | 87 - vendor/github.com/mdlayher/socket/go.mod | 9 - vendor/github.com/mdlayher/socket/go.sum | 12 - vendor/github.com/sirupsen/logrus/.gitignore | 3 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 20 +- .../github.com/sirupsen/logrus/CHANGELOG.md | 136 + vendor/github.com/sirupsen/logrus/README.md | 84 +- vendor/github.com/sirupsen/logrus/alt_exit.go | 18 +- .../github.com/sirupsen/logrus/appveyor.yml | 28 +- .../github.com/sirupsen/logrus/buffer_pool.go | 52 + vendor/github.com/sirupsen/logrus/entry.go | 339 +- vendor/github.com/sirupsen/logrus/exported.go | 93 +- .../github.com/sirupsen/logrus/formatter.go | 33 +- .../sirupsen/logrus/json_formatter.go | 69 +- vendor/github.com/sirupsen/logrus/logger.go | 291 +- vendor/github.com/sirupsen/logrus/logrus.go | 75 +- .../sirupsen/logrus/terminal_bsd.go | 10 - .../logrus/terminal_check_appengine.go | 2 +- .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 6 +- .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/terminal_linux.go | 14 - .../sirupsen/logrus/text_formatter.go | 226 +- vendor/github.com/sirupsen/logrus/writer.go | 8 + vendor/github.com/spf13/pflag/go.mod | 3 - vendor/github.com/spf13/pflag/go.sum | 0 vendor/github.com/ugorji/go/codec/0doc.go | 199 - vendor/github.com/ugorji/go/codec/README.md | 148 - vendor/github.com/ugorji/go/codec/binc.go | 929 - vendor/github.com/ugorji/go/codec/cbor.go | 592 - vendor/github.com/ugorji/go/codec/decode.go | 2053 - .../github.com/ugorji/go/codec/decode_go.go | 16 - .../github.com/ugorji/go/codec/decode_go14.go | 14 - vendor/github.com/ugorji/go/codec/encode.go | 1461 - .../ugorji/go/codec/fast-path.generated.go | 39352 ---------- .../ugorji/go/codec/fast-path.go.tmpl | 527 - .../ugorji/go/codec/fast-path.not.go | 34 - .../ugorji/go/codec/gen-dec-array.go.tmpl | 104 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 - .../ugorji/go/codec/gen-helper.generated.go | 243 - .../ugorji/go/codec/gen-helper.go.tmpl | 372 - .../ugorji/go/codec/gen.generated.go | 175 - vendor/github.com/ugorji/go/codec/gen.go | 2014 - vendor/github.com/ugorji/go/codec/gen_15.go | 12 - vendor/github.com/ugorji/go/codec/gen_16.go | 12 - vendor/github.com/ugorji/go/codec/gen_17.go | 10 - vendor/github.com/ugorji/go/codec/helper.go | 1314 - .../ugorji/go/codec/helper_internal.go | 242 - .../ugorji/go/codec/helper_not_unsafe.go | 20 - .../ugorji/go/codec/helper_unsafe.go | 49 - vendor/github.com/ugorji/go/codec/json.go | 1234 - vendor/github.com/ugorji/go/codec/msgpack.go | 852 - vendor/github.com/ugorji/go/codec/noop.go | 213 - vendor/github.com/ugorji/go/codec/prebuild.go | 3 - vendor/github.com/ugorji/go/codec/prebuild.sh | 199 - vendor/github.com/ugorji/go/codec/rpc.go | 180 - vendor/github.com/ugorji/go/codec/simple.go | 526 - .../ugorji/go/codec/test-cbor-goldens.json | 639 - vendor/github.com/ugorji/go/codec/test.py | 126 - vendor/github.com/ugorji/go/codec/tests.sh | 102 - vendor/github.com/ugorji/go/codec/time.go | 233 - .../github.com/vishvananda/netlink/.gitignore | 1 + .../vishvananda/netlink/.travis.yml | 9 +- vendor/github.com/vishvananda/netlink/addr.go | 1 + .../vishvananda/netlink/addr_linux.go | 106 +- .../vishvananda/netlink/bridge_linux.go | 5 +- .../github.com/vishvananda/netlink/class.go | 60 +- .../vishvananda/netlink/class_linux.go | 29 +- .../vishvananda/netlink/conntrack_linux.go | 152 +- .../vishvananda/netlink/devlink_linux.go | 272 + .../github.com/vishvananda/netlink/filter.go | 113 +- .../vishvananda/netlink/filter_linux.go | 173 +- .../vishvananda/netlink/fou_linux.go | 6 +- .../vishvananda/netlink/genetlink_linux.go | 3 + .../vishvananda/netlink/handle_linux.go | 2 +- .../vishvananda/netlink/handle_unspecified.go | 14 +- .../vishvananda/netlink/inet_diag.go | 30 + .../vishvananda/netlink/ioctl_linux.go | 10 +- .../vishvananda/netlink/ipset_linux.go | 335 + vendor/github.com/vishvananda/netlink/link.go | 261 +- .../vishvananda/netlink/link_linux.go | 755 +- .../github.com/vishvananda/netlink/neigh.go | 1 + .../vishvananda/netlink/neigh_linux.go | 120 +- .../netlink/netlink_unspecified.go | 18 +- .../vishvananda/netlink/netns_linux.go | 4 +- .../vishvananda/netlink/nl/addr_linux.go | 14 +- .../vishvananda/netlink/nl/conntrack_linux.go | 18 +- .../vishvananda/netlink/nl/devlink_linux.go | 40 + .../vishvananda/netlink/nl/ipset_linux.go | 222 + .../vishvananda/netlink/nl/link_linux.go | 106 +- .../vishvananda/netlink/nl/nl_linux.go | 67 +- .../vishvananda/netlink/nl/parse_attr.go | 67 + .../vishvananda/netlink/nl/rdma_link_linux.go | 8 +- .../vishvananda/netlink/nl/syscall.go | 17 +- .../vishvananda/netlink/nl/tc_linux.go | 207 +- .../vishvananda/netlink/nl/xfrm_linux.go | 62 +- .../netlink/nl/xfrm_state_linux.go | 2 +- .../github.com/vishvananda/netlink/qdisc.go | 26 +- .../vishvananda/netlink/qdisc_linux.go | 41 +- .../vishvananda/netlink/rdma_link_linux.go | 165 +- .../github.com/vishvananda/netlink/route.go | 51 +- .../vishvananda/netlink/route_linux.go | 153 +- vendor/github.com/vishvananda/netlink/rule.go | 14 + .../vishvananda/netlink/rule_linux.go | 68 +- .../vishvananda/netlink/socket_linux.go | 97 +- vendor/github.com/vishvananda/netlink/tcp.go | 18 + .../vishvananda/netlink/tcp_linux.go | 393 + .../vishvananda/netlink/xfrm_monitor_linux.go | 6 +- .../vishvananda/netlink/xfrm_policy.go | 5 +- .../vishvananda/netlink/xfrm_policy_linux.go | 8 + .../vishvananda/netlink/xfrm_state.go | 6 +- .../vishvananda/netlink/xfrm_state_linux.go | 14 + vendor/github.com/vishvananda/netns/README.md | 1 - vendor/github.com/vishvananda/netns/netns.go | 19 +- .../vishvananda/netns/netns_linux.go | 88 +- vendor/go.etcd.io/etcd/LICENSE | 202 + .../coreos => go.etcd.io}/etcd/NOTICE | 0 .../etcd/client/README.md | 15 +- .../etcd/client/auth_role.go | 3 +- .../etcd/client/auth_user.go | 3 +- .../etcd/client/cancelreq.go | 0 .../etcd/client/client.go | 68 +- .../etcd/client/cluster_error.go | 0 .../coreos => go.etcd.io}/etcd/client/curl.go | 0 vendor/go.etcd.io/etcd/client/discover.go | 40 + .../coreos => go.etcd.io}/etcd/client/doc.go | 6 +- vendor/go.etcd.io/etcd/client/json.go | 72 + .../coreos => go.etcd.io}/etcd/client/keys.go | 17 +- .../etcd/client/members.go | 7 +- .../coreos => go.etcd.io}/etcd/client/util.go | 0 .../etcd/pkg/fileutil/dir_unix.go | 5 + .../etcd/pkg/fileutil/dir_windows.go | 5 + .../etcd/pkg/fileutil/doc.go} | 11 +- .../etcd/pkg/fileutil/fileutil.go | 72 +- .../etcd/pkg/fileutil/lock.go | 0 .../etcd/pkg/fileutil/lock_flock.go | 0 .../etcd/pkg/fileutil/lock_linux.go | 11 +- .../etcd/pkg/fileutil/lock_plan9.go | 0 .../etcd/pkg/fileutil/lock_solaris.go | 0 .../etcd/pkg/fileutil/lock_unix.go | 0 .../etcd/pkg/fileutil/lock_windows.go | 2 +- .../etcd/pkg/fileutil/preallocate.go | 15 +- .../etcd/pkg/fileutil/preallocate_darwin.go | 22 + .../etcd/pkg/fileutil/preallocate_unix.go | 0 .../pkg/fileutil/preallocate_unsupported.go | 0 .../etcd/pkg/fileutil/purge.go | 30 +- .../go.etcd.io/etcd/pkg/fileutil/read_dir.go | 70 + .../etcd/pkg/fileutil/sync.go | 0 .../etcd/pkg/fileutil/sync_darwin.go | 0 .../etcd/pkg/fileutil/sync_linux.go | 0 .../etcd/pkg/pathutil/path.go | 0 vendor/go.etcd.io/etcd/pkg/srv/srv.go | 142 + .../etcd/pkg/tlsutil/cipher_suites.go | 51 + .../etcd/pkg/tlsutil/doc.go | 0 .../etcd/pkg/tlsutil/tlsutil.go | 1 + .../etcd/pkg/transport/doc.go | 0 .../etcd/pkg/transport/keepalive_listener.go | 2 +- .../etcd/pkg/transport/limit_listen.go | 0 .../go.etcd.io/etcd/pkg/transport/listener.go | 449 + .../etcd/pkg/transport/listener_tls.go | 272 + .../etcd/pkg/transport/timeout_conn.go | 0 .../etcd/pkg/transport/timeout_dialer.go | 0 .../etcd/pkg/transport/timeout_listener.go | 5 +- .../etcd/pkg/transport/timeout_transport.go | 0 .../etcd/pkg/transport/tls.go | 0 .../etcd/pkg/transport/transport.go | 0 .../etcd/pkg/transport/unix_listener.go | 4 +- .../etcd/pkg/types/doc.go | 0 .../etcd/pkg/types/id.go | 4 +- .../etcd/pkg/types/set.go | 21 +- .../etcd/pkg/types/slice.go | 0 .../etcd/pkg/types/urls.go | 0 .../etcd/pkg/types/urlsmap.go | 0 vendor/go.etcd.io/etcd/version/version.go | 56 + vendor/go.opencensus.io/go.mod | 15 - vendor/go.opencensus.io/go.sum | 73 - .../go.opencensus.io/plugin/ochttp/trace.go | 3 + vendor/go.uber.org/atomic/.codecov.yml | 19 + vendor/go.uber.org/atomic/.gitignore | 12 + vendor/go.uber.org/atomic/.travis.yml | 27 + vendor/go.uber.org/atomic/CHANGELOG.md | 76 + .../atomic/LICENSE.txt} | 13 +- vendor/go.uber.org/atomic/Makefile | 78 + vendor/go.uber.org/atomic/README.md | 63 + vendor/go.uber.org/atomic/bool.go | 81 + vendor/go.uber.org/atomic/bool_ext.go | 53 + vendor/go.uber.org/atomic/doc.go | 23 + vendor/go.uber.org/atomic/duration.go | 82 + vendor/go.uber.org/atomic/duration_ext.go | 40 + vendor/go.uber.org/atomic/error.go | 51 + vendor/go.uber.org/atomic/error_ext.go | 39 + vendor/go.uber.org/atomic/float64.go | 76 + vendor/go.uber.org/atomic/float64_ext.go | 47 + vendor/go.uber.org/atomic/gen.go | 26 + vendor/go.uber.org/atomic/int32.go | 102 + vendor/go.uber.org/atomic/int64.go | 102 + vendor/go.uber.org/atomic/nocmp.go | 35 + vendor/go.uber.org/atomic/string.go | 54 + vendor/go.uber.org/atomic/string_ext.go | 43 + vendor/go.uber.org/atomic/uint32.go | 102 + vendor/go.uber.org/atomic/uint64.go | 102 + vendor/go.uber.org/atomic/value.go | 31 + vendor/go.uber.org/multierr/.codecov.yml | 15 + vendor/go.uber.org/multierr/.gitignore | 4 + vendor/go.uber.org/multierr/.travis.yml | 23 + vendor/go.uber.org/multierr/CHANGELOG.md | 60 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/Makefile | 42 + vendor/go.uber.org/multierr/README.md | 23 + vendor/go.uber.org/multierr/error.go | 449 + vendor/go.uber.org/multierr/glide.yaml | 8 + vendor/go.uber.org/multierr/go113.go | 52 + vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 32 + vendor/go.uber.org/zap/.readme.tmpl | 109 + vendor/go.uber.org/zap/CHANGELOG.md | 460 + vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 + vendor/go.uber.org/zap/CONTRIBUTING.md | 75 + vendor/go.uber.org/zap/FAQ.md | 164 + vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 73 + vendor/go.uber.org/zap/README.md | 134 + vendor/go.uber.org/zap/array.go | 320 + vendor/go.uber.org/zap/buffer/buffer.go | 123 + vendor/go.uber.org/zap/buffer/pool.go | 49 + vendor/go.uber.org/zap/checklicense.sh | 17 + vendor/go.uber.org/zap/config.go | 264 + vendor/go.uber.org/zap/doc.go | 113 + vendor/go.uber.org/zap/encoder.go | 79 + vendor/go.uber.org/zap/error.go | 80 + vendor/go.uber.org/zap/field.go | 549 + vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/glide.yaml | 34 + vendor/go.uber.org/zap/global.go | 168 + vendor/go.uber.org/zap/global_go112.go | 26 + vendor/go.uber.org/zap/global_prego112.go | 26 + vendor/go.uber.org/zap/http_handler.go | 132 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 64 + vendor/go.uber.org/zap/level.go | 132 + vendor/go.uber.org/zap/logger.go | 345 + vendor/go.uber.org/zap/options.go | 140 + vendor/go.uber.org/zap/sink.go | 161 + vendor/go.uber.org/zap/stacktrace.go | 85 + vendor/go.uber.org/zap/sugar.go | 315 + vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 99 + .../zap/zapcore/console_encoder.go | 161 + vendor/go.uber.org/zap/zapcore/core.go | 113 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 443 + vendor/go.uber.org/zap/zapcore/entry.go | 264 + vendor/go.uber.org/zap/zapcore/error.go | 132 + vendor/go.uber.org/zap/zapcore/field.go | 233 + vendor/go.uber.org/zap/zapcore/hook.go | 68 + .../go.uber.org/zap/zapcore/increase_level.go | 66 + .../go.uber.org/zap/zapcore/json_encoder.go | 534 + vendor/go.uber.org/zap/zapcore/level.go | 175 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 61 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + vendor/go.uber.org/zap/zapcore/sampler.go | 208 + vendor/go.uber.org/zap/zapcore/tee.go | 81 + .../go.uber.org/zap/zapcore/write_syncer.go | 122 + vendor/golang.org/x/oauth2/README.md | 13 +- vendor/golang.org/x/oauth2/go.mod | 10 - vendor/golang.org/x/oauth2/go.sum | 12 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 + vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 + vendor/golang.org/x/sys/plan9/race.go | 1 + vendor/golang.org/x/sys/plan9/race0.go | 1 + vendor/golang.org/x/sys/plan9/str.go | 1 + vendor/golang.org/x/sys/plan9/syscall.go | 1 + .../golang.org/x/sys/plan9/syscall_plan9.go | 6 +- .../x/sys/plan9/zsyscall_plan9_386.go | 1 + .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 + .../x/sys/plan9/zsyscall_plan9_arm.go | 1 + vendor/golang.org/x/sys/unix/README.md | 2 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 5 +- .../golang.org/x/sys/unix/sockcmsg_linux.go | 49 + vendor/golang.org/x/sys/unix/syscall_aix.go | 28 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 24 +- .../golang.org/x/sys/unix/syscall_darwin.go | 27 +- .../x/sys/unix/syscall_dragonfly.go | 10 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 6 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 58 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 14 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 6 +- .../golang.org/x/sys/unix/syscall_solaris.go | 28 +- .../x/sys/unix/syscall_zos_s390x.go | 22 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 47 +- .../x/sys/unix/zerrors_linux_386.go | 3 +- .../x/sys/unix/zerrors_linux_amd64.go | 3 +- .../x/sys/unix/zerrors_linux_arm.go | 3 +- .../x/sys/unix/zerrors_linux_arm64.go | 3 +- .../x/sys/unix/zerrors_linux_mips.go | 3 +- .../x/sys/unix/zerrors_linux_mips64.go | 3 +- .../x/sys/unix/zerrors_linux_mips64le.go | 3 +- .../x/sys/unix/zerrors_linux_mipsle.go | 3 +- .../x/sys/unix/zerrors_linux_ppc.go | 3 +- .../x/sys/unix/zerrors_linux_ppc64.go | 3 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 3 +- .../x/sys/unix/zerrors_linux_riscv64.go | 3 +- .../x/sys/unix/zerrors_linux_s390x.go | 3 +- .../x/sys/unix/zerrors_linux_sparc64.go | 3 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 22 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 20 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 20 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 17 +- .../x/sys/unix/zsyscall_netbsd_386.go | 12 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 12 - .../x/sys/unix/zsyscall_netbsd_arm.go | 12 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 12 - .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 6 +- .../x/sys/unix/ztypes_darwin_arm64.go | 6 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 93 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 +- .../x/sys/unix/ztypes_linux_amd64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 +- .../x/sys/unix/ztypes_linux_arm64.go | 2 +- .../x/sys/unix/ztypes_linux_mips.go | 2 +- .../x/sys/unix/ztypes_linux_mips64.go | 2 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2 +- .../x/sys/unix/ztypes_linux_s390x.go | 2 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2 +- .../x/sys/unix/ztypes_openbsd_386.go | 11 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 11 +- .../x/sys/unix/ztypes_openbsd_arm.go | 11 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 11 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 11 +- .../golang.org/x/sys/windows/exec_windows.go | 37 +- vendor/golang.org/x/sys/windows/mksyscall.go | 2 +- .../golang.org/x/sys/windows/registry/key.go | 9 + .../x/sys/windows/registry/mksyscall.go | 1 + .../x/sys/windows/registry/syscall.go | 1 + .../x/sys/windows/registry/value.go | 1 + vendor/golang.org/x/sys/windows/service.go | 12 +- .../x/sys/windows/setupapi_windows.go | 1425 + .../x/sys/windows/setupapierrors_windows.go | 100 - .../x/sys/windows/syscall_windows.go | 20 +- .../golang.org/x/sys/windows/types_windows.go | 112 +- .../x/sys/windows/zsyscall_windows.go | 335 + vendor/golang.org/x/term/go.mod | 5 - vendor/golang.org/x/term/go.sum | 2 - vendor/golang.org/x/time/rate/rate.go | 12 +- .../golang.zx2c4.com/wireguard/wgctrl/go.mod | 13 - .../golang.zx2c4.com/wireguard/wgctrl/go.sum | 98 - .../api/compute/v1/compute-api.json | 7148 +- .../api/compute/v1/compute-gen.go | 60463 +++++++++------- .../api/googleapi/googleapi.go | 2 +- .../api/internal/conn_pool.go | 30 + .../google.golang.org/api/internal/creds.go | 13 + .../api/internal/gensupport/version.go | 53 + vendor/google.golang.org/api/internal/pool.go | 51 - .../api/internal/settings.go | 17 + .../option/internaloption/internaloption.go | 26 + vendor/google.golang.org/api/option/option.go | 43 +- .../api/transport/cert/default_cert.go | 110 + .../api/transport/http/dial.go | 144 +- .../google.golang.org/appengine/.travis.yml | 2 - vendor/google.golang.org/appengine/go.mod | 9 - vendor/google.golang.org/appengine/go.sum | 11 - .../appengine/internal/api.go | 7 +- .../googleapis/rpc/status/status.pb.go | 14 +- vendor/google.golang.org/grpc/README.md | 20 + .../grpc/balancer/base/balancer.go | 87 +- .../grpc/balancer_conn_wrappers.go | 4 +- vendor/google.golang.org/grpc/clientconn.go | 104 +- .../grpc/credentials/credentials.go | 6 +- .../google.golang.org/grpc/credentials/tls.go | 18 +- vendor/google.golang.org/grpc/dialoptions.go | 12 + vendor/google.golang.org/grpc/go.mod | 16 - vendor/google.golang.org/grpc/go.sum | 53 - .../google.golang.org/grpc/grpclog/grpclog.go | 42 +- .../google.golang.org/grpc/grpclog/logger.go | 4 +- .../grpc/grpclog/loggerv2.go | 21 +- .../grpc/internal/channelz/funcs.go | 22 +- .../grpc/internal/channelz/logging.go | 100 + .../grpc/internal/grpclog/grpclog.go | 118 + .../grpc/internal/grpclog/prefixLogger.go | 63 + .../grpc/internal/grpcutil/target.go | 55 + .../grpc/internal/internal.go | 5 - .../grpc/internal/status/status.go | 166 + .../grpc/internal/transport/handler_server.go | 77 +- .../grpc/internal/transport/http2_client.go | 18 +- .../grpc/internal/transport/http2_server.go | 16 +- .../grpc/resolver_conn_wrapper.go | 55 +- vendor/google.golang.org/grpc/rpc_util.go | 18 +- vendor/google.golang.org/grpc/server.go | 114 +- vendor/google.golang.org/grpc/stats/stats.go | 19 +- .../google.golang.org/grpc/status/status.go | 119 +- vendor/google.golang.org/grpc/stream.go | 5 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 42 +- .../protobuf/encoding/prototext/decode.go | 98 +- .../protobuf/encoding/prototext/encode.go | 96 +- .../protobuf/internal/descfmt/stringer.go | 2 + .../protobuf/internal/detrand/rand.go | 8 + .../encoding/messageset/messageset.go | 33 +- .../protobuf/internal/encoding/tag/tag.go | 2 +- .../protobuf/internal/encoding/text/encode.go | 13 +- .../protobuf/internal/fieldnum/any_gen.go | 13 - .../protobuf/internal/fieldnum/api_gen.go | 35 - .../internal/fieldnum/descriptor_gen.go | 240 - .../protobuf/internal/fieldnum/doc.go | 7 - .../internal/fieldnum/duration_gen.go | 13 - .../protobuf/internal/fieldnum/empty_gen.go | 10 - .../internal/fieldnum/field_mask_gen.go | 12 - .../internal/fieldnum/source_context_gen.go | 12 - .../protobuf/internal/fieldnum/struct_gen.go | 33 - .../internal/fieldnum/timestamp_gen.go | 13 - .../protobuf/internal/fieldnum/type_gen.go | 53 - .../internal/fieldnum/wrappers_gen.go | 52 - .../protobuf/internal/fieldsort/fieldsort.go | 40 - .../protobuf/internal/filedesc/build.go | 19 +- .../protobuf/internal/filedesc/desc.go | 82 +- .../protobuf/internal/filedesc/desc_init.go | 62 +- .../protobuf/internal/filedesc/desc_lazy.go | 128 +- .../protobuf/internal/filedesc/desc_list.go | 176 +- .../internal/filedesc/desc_list_gen.go | 11 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/genname/name.go | 25 - .../protobuf/internal/impl/api_export.go | 9 +- .../protobuf/internal/impl/codec_field.go | 18 +- .../protobuf/internal/impl/codec_gen.go | 974 +- .../protobuf/internal/impl/codec_map.go | 24 +- .../protobuf/internal/impl/codec_message.go | 68 +- .../internal/impl/codec_messageset.go | 21 +- .../protobuf/internal/impl/codec_reflect.go | 8 +- .../protobuf/internal/impl/convert.go | 29 + .../protobuf/internal/impl/decode.go | 16 +- .../protobuf/internal/impl/encode.go | 10 +- .../protobuf/internal/impl/legacy_export.go | 2 +- .../internal/impl/legacy_extension.go | 3 +- .../protobuf/internal/impl/legacy_message.go | 129 +- .../protobuf/internal/impl/merge.go | 6 +- .../protobuf/internal/impl/message.go | 79 +- .../protobuf/internal/impl/message_reflect.go | 125 +- .../internal/impl/message_reflect_field.go | 85 +- .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/impl/validate.go | 5 +- .../protobuf/internal/mapsort/mapsort.go | 43 - .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 23 +- .../protobuf/proto/decode_gen.go | 128 +- .../protobuf/proto/encode.go | 55 +- .../google.golang.org/protobuf/proto/equal.go | 25 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 9 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 + .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/proto.go | 50 +- .../protobuf/reflect/protoreflect/source.go | 84 +- .../reflect/protoreflect/source_gen.go | 461 + .../protobuf/reflect/protoreflect/type.go | 34 + .../reflect/protoregistry/registry.go | 200 +- .../types/descriptorpb/descriptor.pb.go | 3957 + .../protobuf/types/known/anypb/any.pb.go | 229 +- .../types/known/durationpb/duration.pb.go | 150 +- .../types/known/timestamppb/timestamp.pb.go | 139 +- vendor/gopkg.in/yaml.v2/.travis.yml | 1 + vendor/gopkg.in/yaml.v2/apic.go | 5 + vendor/gopkg.in/yaml.v2/go.mod | 5 - vendor/gopkg.in/yaml.v2/yaml.go | 14 +- .../admissionregistration/v1/generated.pb.go | 50 +- .../admissionregistration/v1/generated.proto | 2 +- .../v1beta1/generated.pb.go | 50 +- .../v1beta1/generated.proto | 2 +- .../api/apiserverinternal/v1alpha1/doc.go | 25 + .../v1alpha1/generated.pb.go | 1700 + .../v1alpha1/generated.proto | 121 + .../v1alpha1/register.go | 16 +- .../api/apiserverinternal/v1alpha1/types.go | 127 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../v1alpha1/zz_generated.deepcopy.go | 175 + vendor/k8s.io/api/apps/v1/generated.pb.go | 140 +- vendor/k8s.io/api/apps/v1/generated.proto | 2 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 109 +- .../k8s.io/api/apps/v1beta1/generated.proto | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 157 +- .../k8s.io/api/apps/v1beta2/generated.proto | 2 +- .../api/authentication/v1/generated.pb.go | 47 +- .../api/authentication/v1/generated.proto | 2 +- .../authentication/v1beta1/generated.pb.go | 27 +- .../authentication/v1beta1/generated.proto | 2 +- .../api/authorization/v1/generated.pb.go | 72 +- .../api/authorization/v1/generated.proto | 2 +- .../api/authorization/v1beta1/generated.pb.go | 72 +- .../api/authorization/v1beta1/generated.proto | 2 +- .../k8s.io/api/autoscaling/v1/generated.pb.go | 1020 +- .../k8s.io/api/autoscaling/v1/generated.proto | 85 +- vendor/k8s.io/api/autoscaling/v1/types.go | 81 +- .../v1/types_swagger_doc_generated.go | 50 +- .../autoscaling/v1/zz_generated.deepcopy.go | 58 + .../api/autoscaling/v2beta1/generated.pb.go | 996 +- .../api/autoscaling/v2beta1/generated.proto | 85 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 81 +- .../v2beta1/types_swagger_doc_generated.go | 50 +- .../v2beta1/zz_generated.deepcopy.go | 58 + .../api/autoscaling/v2beta2/generated.pb.go | 979 +- .../api/autoscaling/v2beta2/generated.proto | 65 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 65 +- .../v2beta2/types_swagger_doc_generated.go | 48 +- .../v2beta2/zz_generated.deepcopy.go | 44 + vendor/k8s.io/api/batch/v1/generated.pb.go | 25 +- vendor/k8s.io/api/batch/v1/generated.proto | 4 +- vendor/k8s.io/api/batch/v1/types.go | 2 + .../batch/v1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 30 +- .../k8s.io/api/batch/v1beta1/generated.proto | 2 +- .../k8s.io/api/batch/v2alpha1/generated.pb.go | 30 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 2 +- .../api/certificates/v1/generated.pb.go | 32 +- .../api/certificates/v1/generated.proto | 2 +- .../api/certificates/v1beta1/generated.pb.go | 32 +- .../api/certificates/v1beta1/generated.proto | 2 +- .../api/coordination/v1/generated.pb.go | 15 +- .../api/coordination/v1/generated.proto | 2 +- .../api/coordination/v1beta1/generated.pb.go | 15 +- .../api/coordination/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 3775 +- vendor/k8s.io/api/core/v1/generated.proto | 229 +- vendor/k8s.io/api/core/v1/resource.go | 51 +- vendor/k8s.io/api/core/v1/types.go | 268 +- .../core/v1/types_swagger_doc_generated.go | 57 +- .../k8s.io/api/core/v1/well_known_labels.go | 14 +- .../api/core/v1/zz_generated.deepcopy.go | 64 +- .../api/discovery/v1alpha1/generated.pb.go | 242 +- .../api/discovery/v1alpha1/generated.proto | 31 +- vendor/k8s.io/api/discovery/v1alpha1/types.go | 28 +- .../v1alpha1/types_swagger_doc_generated.go | 11 +- .../v1alpha1/zz_generated.deepcopy.go | 15 + .../api/discovery/v1beta1/generated.pb.go | 241 +- .../api/discovery/v1beta1/generated.proto | 31 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 34 +- .../v1beta1/types_swagger_doc_generated.go | 11 +- .../v1beta1/zz_generated.deepcopy.go | 15 + vendor/k8s.io/api/events/v1/generated.pb.go | 15 +- vendor/k8s.io/api/events/v1/generated.proto | 18 +- vendor/k8s.io/api/events/v1/types.go | 19 +- .../events/v1/types_swagger_doc_generated.go | 8 +- .../k8s.io/api/events/v1beta1/generated.pb.go | 15 +- .../k8s.io/api/events/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/events/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 284 +- .../api/extensions/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go | 1 + .../api/flowcontrol/v1alpha1/generated.pb.go | 110 +- .../api/flowcontrol/v1alpha1/generated.proto | 2 +- .../k8s.io/api/flowcontrol/v1alpha1/types.go | 22 + .../zz_generated.prerelease-lifecycle.go | 121 + vendor/k8s.io/api/flowcontrol/v1beta1/doc.go | 25 + .../api/flowcontrol/v1beta1/generated.pb.go | 5367 ++ .../api/flowcontrol/v1beta1/generated.proto | 434 + .../api/flowcontrol/v1beta1/register.go | 58 + .../k8s.io/api/flowcontrol/v1beta1/types.go | 529 + .../v1beta1/types_swagger_doc_generated.go | 258 + .../v1beta1/zz_generated.deepcopy.go | 541 + .../zz_generated.prerelease-lifecycle.go | 93 + .../k8s.io/api/networking/v1/generated.pb.go | 115 +- .../k8s.io/api/networking/v1/generated.proto | 2 +- .../api/networking/v1beta1/generated.pb.go | 65 +- .../api/networking/v1beta1/generated.proto | 2 +- .../api/{settings/v1alpha1 => node/v1}/doc.go | 6 +- vendor/k8s.io/api/node/v1/generated.pb.go | 1399 + vendor/k8s.io/api/node/v1/generated.proto | 109 + vendor/k8s.io/api/node/v1/register.go | 52 + vendor/k8s.io/api/node/v1/types.go | 107 + .../node/v1/types_swagger_doc_generated.go | 71 + .../v1}/zz_generated.deepcopy.go | 99 +- .../k8s.io/api/node/v1alpha1/generated.pb.go | 29 +- .../k8s.io/api/node/v1alpha1/generated.proto | 6 +- vendor/k8s.io/api/node/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/node/v1beta1/generated.pb.go | 24 +- .../k8s.io/api/node/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/node/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 97 +- .../k8s.io/api/policy/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 60 +- vendor/k8s.io/api/rbac/v1/generated.proto | 2 +- .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 2 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 2 +- .../k8s.io/api/scheduling/v1/generated.pb.go | 10 +- .../k8s.io/api/scheduling/v1/generated.proto | 2 +- .../api/scheduling/v1alpha1/generated.pb.go | 10 +- .../api/scheduling/v1alpha1/generated.proto | 2 +- .../api/scheduling/v1beta1/generated.pb.go | 10 +- .../api/scheduling/v1beta1/generated.proto | 2 +- .../api/settings/v1alpha1/generated.pb.go | 1053 - .../api/settings/v1alpha1/generated.proto | 75 - vendor/k8s.io/api/settings/v1alpha1/types.go | 70 - .../v1alpha1/types_swagger_doc_generated.go | 61 - vendor/k8s.io/api/storage/v1/generated.pb.go | 560 +- vendor/k8s.io/api/storage/v1/generated.proto | 52 +- vendor/k8s.io/api/storage/v1/types.go | 51 + .../storage/v1/types_swagger_doc_generated.go | 12 + .../api/storage/v1/zz_generated.deepcopy.go | 33 + .../api/storage/v1alpha1/generated.pb.go | 42 +- .../api/storage/v1alpha1/generated.proto | 2 +- .../api/storage/v1beta1/generated.pb.go | 563 +- .../api/storage/v1beta1/generated.proto | 52 +- vendor/k8s.io/api/storage/v1beta1/types.go | 51 + .../v1beta1/types_swagger_doc_generated.go | 12 + .../storage/v1beta1/zz_generated.deepcopy.go | 33 + .../apimachinery/pkg/api/meta/conditions.go | 1 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 4 - .../apimachinery/pkg/api/meta/restmapper.go | 3 + .../pkg/api/resource/generated.proto | 2 +- .../apimachinery/pkg/api/resource/quantity.go | 36 +- .../apimachinery/pkg/apis/meta/v1/OWNERS | 1 - .../pkg/apis/meta/v1/generated.pb.go | 216 +- .../pkg/apis/meta/v1/generated.proto | 3 +- .../pkg/apis/meta/v1/group_version.go | 9 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 14 + .../pkg/apis/meta/v1/micro_time.go | 15 - .../pkg/apis/meta/v1/micro_time_fuzz.go | 39 + .../apimachinery/pkg/apis/meta/v1/time.go | 15 - .../pkg/apis/meta/v1/time_fuzz.go | 39 + .../apimachinery/pkg/apis/meta/v1/types.go | 1 + .../pkg/apis/meta/v1/unstructured/helpers.go | 8 - .../pkg/apis/meta/v1beta1/generated.pb.go | 5 +- .../pkg/apis/meta/v1beta1/generated.proto | 2 +- .../apimachinery/pkg/conversion/converter.go | 598 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 19 - .../apimachinery/pkg/labels/selector.go | 50 +- .../apimachinery/pkg/runtime/converter.go | 3 + .../apimachinery/pkg/runtime/generated.pb.go | 15 +- .../apimachinery/pkg/runtime/generated.proto | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 2 +- .../apimachinery/pkg/runtime/negotiate.go | 33 - .../pkg/runtime/schema/generated.proto | 2 +- .../pkg/runtime/schema/interfaces.go | 4 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 30 +- .../pkg/runtime/serializer/codec_factory.go | 12 +- .../pkg/runtime/serializer/json/json.go | 23 +- .../runtime/serializer/protobuf/protobuf.go | 4 + .../apimachinery/pkg/types/namespacedname.go | 6 +- .../apimachinery/pkg/util/cache/expiring.go | 2 +- .../apimachinery/pkg/util/clock/clock.go | 18 +- .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 3 + .../pkg/util/intstr/generated.pb.go | 5 +- .../pkg/util/intstr/generated.proto | 2 +- .../pkg/util/intstr/instr_fuzz.go | 42 + .../apimachinery/pkg/util/intstr/intstr.go | 70 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 33 +- .../apimachinery/pkg/util/net/port_range.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 4 +- .../pkg/util/validation/field/path.go | 3 + .../pkg/util/validation/validation.go | 6 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 29 + .../apimachinery/pkg/util/yaml/decoder.go | 31 + vendor/k8s.io/apimachinery/pkg/watch/mux.go | 65 +- .../apimachinery/pkg/watch/streamwatcher.go | 2 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 2 +- .../client-go/discovery/discovery_client.go | 2 +- .../k8s.io/client-go/kubernetes/clientset.go | 56 +- .../client-go/kubernetes/scheme/register.go | 8 +- .../v1alpha1/apiserverinternal_client.go} | 32 +- .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go | 2 +- .../v1alpha1/storageversion.go | 184 + .../typed/flowcontrol/v1beta1/doc.go | 20 + .../flowcontrol/v1beta1/flowcontrol_client.go | 94 + .../typed/flowcontrol/v1beta1/flowschema.go | 184 + .../v1beta1/generated_expansion.go | 23 + .../v1beta1/prioritylevelconfiguration.go | 184 + .../client-go/kubernetes/typed/node/v1/doc.go | 20 + .../typed/node/v1/generated_expansion.go | 21 + .../kubernetes/typed/node/v1/node_client.go | 89 + .../kubernetes/typed/node/v1/runtimeclass.go | 168 + .../typed/settings/v1alpha1/podpreset.go | 178 - .../pkg/apis/clientauthentication/types.go | 69 +- .../v1alpha1/conversion.go | 27 + .../clientauthentication/v1alpha1/types.go | 6 +- .../v1alpha1/zz_generated.conversion.go | 16 +- .../v1beta1/conversion.go | 8 +- .../clientauthentication/v1beta1/types.go | 74 +- .../v1beta1/zz_generated.conversion.go | 62 + .../v1beta1/zz_generated.deepcopy.go | 29 +- .../zz_generated.deepcopy.go | 29 + .../plugin/pkg/client/auth/exec/exec.go | 58 +- vendor/k8s.io/client-go/rest/config.go | 30 +- vendor/k8s.io/client-go/rest/exec.go | 85 + vendor/k8s.io/client-go/rest/request.go | 22 +- vendor/k8s.io/client-go/rest/transport.go | 11 +- vendor/k8s.io/client-go/rest/warnings.go | 7 +- .../k8s.io/client-go/tools/auth/clientauth.go | 4 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 1 - .../client-go/tools/cache/controller.go | 4 + .../client-go/tools/cache/delta_fifo.go | 34 +- .../k8s.io/client-go/tools/cache/reflector.go | 27 +- .../client-go/tools/cache/shared_informer.go | 4 +- .../client-go/tools/clientcmd/api/types.go | 42 +- .../client-go/tools/clientcmd/api/v1/types.go | 13 +- .../api/v1/zz_generated.conversion.go | 23 +- .../clientcmd/api/zz_generated.deepcopy.go | 3 + .../tools/clientcmd/client_config.go | 12 +- .../client-go/tools/clientcmd/config.go | 5 +- .../client-go/tools/clientcmd/loader.go | 4 + vendor/k8s.io/client-go/transport/cache.go | 2 +- vendor/k8s.io/client-go/transport/config.go | 8 +- .../client-go/transport/round_trippers.go | 8 +- vendor/k8s.io/klog/go.mod | 5 - vendor/k8s.io/klog/go.sum | 2 - vendor/k8s.io/klog/v2/README.md | 6 +- vendor/k8s.io/klog/v2/SECURITY.md | 22 + vendor/k8s.io/klog/v2/go.mod | 5 - vendor/k8s.io/klog/v2/go.sum | 2 - vendor/k8s.io/klog/v2/klog.go | 177 +- .../kube-openapi/pkg/util/proto/document.go | 62 +- .../kube-openapi/pkg/util/proto/openapi.go | 7 + vendor/modules.txt | 240 +- .../v4/value/reflectcache.go | 10 +- vendor/sigs.k8s.io/yaml/go.mod | 8 - vendor/sigs.k8s.io/yaml/go.sum | 9 - 1169 files changed, 123671 insertions(+), 100640 deletions(-) delete mode 100644 vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json create mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/.gitattributes create mode 100644 vendor/github.com/Microsoft/hcsshim/.golangci.yml delete mode 100644 vendor/github.com/Microsoft/hcsshim/.gometalinter.json create mode 100644 vendor/github.com/Microsoft/hcsshim/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile create mode 100644 vendor/github.com/Microsoft/hcsshim/Protobuild.toml delete mode 100644 vendor/github.com/Microsoft/hcsshim/appveyor.yml create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/attach.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/detach.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/export.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/import.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/mount.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/setup.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema1/schema1.go (94%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/attachment.go (78%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/battery.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/cache_query_stats_response.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/chipset.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/close_handle.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/com_port.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/compute_system.go (91%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/configuration.go (67%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/console_size.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/container.go (90%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go rename vendor/github.com/Microsoft/hcsshim/internal/{schema2/device.go => hcs/schema2/container_credential_guard_remove_instance_request.go} (57%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/container_credential_guard_state.go (100%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/container_memory_information.go (99%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/devices.go (86%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/enhanced_mode_video.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/flexible_io_device.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/guest_connection.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/guest_connection_info.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/guest_crash_reporting.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/guest_os.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/guest_state.go (100%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/hosted_system.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/hv_socket.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/hv_socket_2.go (99%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/hv_socket_service_config.go (76%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/hv_socket_system_config.go (100%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/keyboard.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/layer.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/linux_kernel_direct.go (100%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/mapped_directory.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/mapped_pipe.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/memory.go (86%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/memory_information_for_vm.go (87%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/memory_stats.go (55%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/modify_setting_request.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/mouse.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/network_adapter.go (76%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/networking.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/pause_notification.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/pause_options.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/plan9.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/plan9_share.go (94%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/process_details.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/process_modify_request.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/process_parameters.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/process_status.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/processor.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/processor_2.go (73%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/processor_stats.go (62%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/properties.go (83%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/property_query.go (75%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/rdp_connection_options.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/registry_changes.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/registry_key.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/registry_value.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/restore_state.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/save_options.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/scsi.go (100%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/shared_memory_configuration.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/shared_memory_region.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/shared_memory_region_info.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/silo_properties.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/statistics.go (92%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/storage.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/storage_qo_s.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/storage_stats.go (56%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/topology.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/uefi.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/uefi_boot_entry.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/version.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/video_monitor.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_machine.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_node_info.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_p_mem_controller.go (100%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_p_mem_device.go (99%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_smb.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_smb_share.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/virtual_smb_share_options.go (99%) rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/vm_memory.go (83%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go rename vendor/github.com/Microsoft/hcsshim/internal/{ => hcs}/schema2/windows_crash_reporting.go (99%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/g.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/span.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go rename vendor/github.com/Microsoft/hcsshim/internal/{hcs => vmcompute}/zsyscall_windows.go (74%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/vendor.conf create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go delete mode 100644 vendor/github.com/bronze1man/goStrongswanVici/go.mod delete mode 100644 vendor/github.com/bronze1man/goStrongswanVici/go.sum create mode 100644 vendor/github.com/containerd/cgroups/LICENSE create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/doc.go create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.proto delete mode 100644 vendor/github.com/coreos/etcd/client/keys.generated.go delete mode 100644 vendor/github.com/coreos/etcd/client/srv.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener.go rename vendor/github.com/coreos/{etcd => go-semver}/LICENSE (100%) create mode 100644 vendor/github.com/coreos/go-semver/NOTICE create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/eni.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/router_interface.go delete mode 100644 vendor/github.com/go-logr/logr/go.mod create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go delete mode 100644 vendor/github.com/google/gofuzz/go.mod delete mode 100644 vendor/github.com/googleapis/gax-go/v2/go.mod delete mode 100644 vendor/github.com/googleapis/gax-go/v2/go.sum delete mode 100644 vendor/github.com/hashicorp/golang-lru/go.mod create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml delete mode 100644 vendor/github.com/jonboulle/clockwork/.travis.yml create mode 100644 vendor/github.com/jonboulle/clockwork/ticker.go delete mode 100644 vendor/github.com/josharian/native/go.mod delete mode 100644 vendor/github.com/json-iterator/go/go.mod delete mode 100644 vendor/github.com/json-iterator/go/go.sum delete mode 100644 vendor/github.com/mdlayher/genetlink/go.mod delete mode 100644 vendor/github.com/mdlayher/genetlink/go.sum delete mode 100644 vendor/github.com/mdlayher/netlink/go.mod delete mode 100644 vendor/github.com/mdlayher/netlink/go.sum delete mode 100644 vendor/github.com/mdlayher/socket/go.mod delete mode 100644 vendor/github.com/mdlayher/socket/go.sum create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_linux.go delete mode 100644 vendor/github.com/spf13/pflag/go.mod delete mode 100644 vendor/github.com/spf13/pflag/go.sum delete mode 100644 vendor/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/github.com/ugorji/go/codec/README.md delete mode 100644 vendor/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode_go.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode_go14.go delete mode 100644 vendor/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_15.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_16.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_17.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/github.com/ugorji/go/codec/prebuild.go delete mode 100644 vendor/github.com/ugorji/go/codec/prebuild.sh delete mode 100644 vendor/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/github.com/ugorji/go/codec/simple.go delete mode 100644 vendor/github.com/ugorji/go/codec/test-cbor-goldens.json delete mode 100644 vendor/github.com/ugorji/go/codec/test.py delete mode 100644 vendor/github.com/ugorji/go/codec/tests.sh delete mode 100644 vendor/github.com/ugorji/go/codec/time.go create mode 100644 vendor/github.com/vishvananda/netlink/.gitignore create mode 100644 vendor/github.com/vishvananda/netlink/devlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/inet_diag.go create mode 100644 vendor/github.com/vishvananda/netlink/ipset_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/devlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/ipset_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/parse_attr.go create mode 100644 vendor/github.com/vishvananda/netlink/tcp.go create mode 100644 vendor/github.com/vishvananda/netlink/tcp_linux.go create mode 100644 vendor/go.etcd.io/etcd/LICENSE rename vendor/{github.com/coreos => go.etcd.io}/etcd/NOTICE (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/README.md (87%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/auth_role.go (99%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/auth_user.go (99%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/cancelreq.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/client.go (93%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/cluster_error.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/curl.go (100%) create mode 100644 vendor/go.etcd.io/etcd/client/discover.go rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/doc.go (94%) create mode 100644 vendor/go.etcd.io/etcd/client/json.go rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/keys.go (98%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/members.go (98%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/client/util.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/dir_unix.go (87%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/dir_windows.go (92%) rename vendor/{github.com/coreos/etcd/client/discover.go => go.etcd.io/etcd/pkg/fileutil/doc.go} (69%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/fileutil.go (62%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock_flock.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock_linux.go (91%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock_plan9.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock_solaris.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock_unix.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/lock_windows.go (99%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/preallocate.go (82%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/preallocate_darwin.go (61%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/preallocate_unix.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/preallocate_unsupported.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/purge.go (59%) create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/sync.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/sync_darwin.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/fileutil/sync_linux.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/pathutil/path.go (100%) create mode 100644 vendor/go.etcd.io/etcd/pkg/srv/srv.go create mode 100644 vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/tlsutil/doc.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/tlsutil/tlsutil.go (99%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/doc.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/keepalive_listener.go (99%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/limit_listen.go (100%) create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/listener.go create mode 100644 vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_conn.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_dialer.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_listener.go (86%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/timeout_transport.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/tls.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/transport.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/transport/unix_listener.go (87%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/doc.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/id.go (98%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/set.go (88%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/slice.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/urls.go (100%) rename vendor/{github.com/coreos => go.etcd.io}/etcd/pkg/types/urlsmap.go (100%) create mode 100644 vendor/go.etcd.io/etcd/version/version.go delete mode 100644 vendor/go.opencensus.io/go.mod delete mode 100644 vendor/go.opencensus.io/go.sum create mode 100644 vendor/go.uber.org/atomic/.codecov.yml create mode 100644 vendor/go.uber.org/atomic/.gitignore create mode 100644 vendor/go.uber.org/atomic/.travis.yml create mode 100644 vendor/go.uber.org/atomic/CHANGELOG.md rename vendor/{github.com/ugorji/go/LICENSE => go.uber.org/atomic/LICENSE.txt} (84%) create mode 100644 vendor/go.uber.org/atomic/Makefile create mode 100644 vendor/go.uber.org/atomic/README.md create mode 100644 vendor/go.uber.org/atomic/bool.go create mode 100644 vendor/go.uber.org/atomic/bool_ext.go create mode 100644 vendor/go.uber.org/atomic/doc.go create mode 100644 vendor/go.uber.org/atomic/duration.go create mode 100644 vendor/go.uber.org/atomic/duration_ext.go create mode 100644 vendor/go.uber.org/atomic/error.go create mode 100644 vendor/go.uber.org/atomic/error_ext.go create mode 100644 vendor/go.uber.org/atomic/float64.go create mode 100644 vendor/go.uber.org/atomic/float64_ext.go create mode 100644 vendor/go.uber.org/atomic/gen.go create mode 100644 vendor/go.uber.org/atomic/int32.go create mode 100644 vendor/go.uber.org/atomic/int64.go create mode 100644 vendor/go.uber.org/atomic/nocmp.go create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/atomic/string_ext.go create mode 100644 vendor/go.uber.org/atomic/uint32.go create mode 100644 vendor/go.uber.org/atomic/uint64.go create mode 100644 vendor/go.uber.org/atomic/value.go create mode 100644 vendor/go.uber.org/multierr/.codecov.yml create mode 100644 vendor/go.uber.org/multierr/.gitignore create mode 100644 vendor/go.uber.org/multierr/.travis.yml create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/Makefile create mode 100644 vendor/go.uber.org/multierr/README.md create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/multierr/glide.yaml create mode 100644 vendor/go.uber.org/multierr/go113.go create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/FAQ.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/checklicense.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/global_go112.go create mode 100644 vendor/go.uber.org/zap/global_prego112.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go delete mode 100644 vendor/golang.org/x/oauth2/go.mod delete mode 100644 vendor/golang.org/x/oauth2/go.sum create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go delete mode 100644 vendor/golang.org/x/term/go.mod delete mode 100644 vendor/golang.org/x/term/go.sum delete mode 100644 vendor/golang.zx2c4.com/wireguard/wgctrl/go.mod delete mode 100644 vendor/golang.zx2c4.com/wireguard/wgctrl/go.sum create mode 100644 vendor/google.golang.org/api/internal/conn_pool.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/version.go delete mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/api/option/internaloption/internaloption.go create mode 100644 vendor/google.golang.org/api/transport/cert/default_cert.go delete mode 100644 vendor/google.golang.org/appengine/go.mod delete mode 100644 vendor/google.golang.org/appengine/go.sum delete mode 100644 vendor/google.golang.org/grpc/go.mod delete mode 100644 vendor/google.golang.org/grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genname/name.go delete mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go delete mode 100644 vendor/gopkg.in/yaml.v2/go.mod create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto rename vendor/k8s.io/api/{settings => apiserverinternal}/v1alpha1/register.go (73%) create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go rename vendor/k8s.io/api/{settings/v1alpha1 => node/v1}/doc.go (82%) create mode 100644 vendor/k8s.io/api/node/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1/register.go create mode 100644 vendor/k8s.io/api/node/v1/types.go create mode 100644 vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go rename vendor/k8s.io/api/{settings/v1alpha1 => node/v1}/zz_generated.deepcopy.go (54%) delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go rename vendor/k8s.io/client-go/kubernetes/typed/{settings/v1alpha1/settings_client.go => apiserverinternal/v1alpha1/apiserverinternal_client.go} (64%) rename vendor/k8s.io/client-go/kubernetes/typed/{settings => apiserverinternal}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{settings => apiserverinternal}/v1alpha1/generated_expansion.go (93%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/client-go/rest/exec.go delete mode 100644 vendor/k8s.io/klog/go.mod delete mode 100644 vendor/k8s.io/klog/go.sum create mode 100644 vendor/k8s.io/klog/v2/SECURITY.md delete mode 100644 vendor/k8s.io/klog/v2/go.mod delete mode 100644 vendor/k8s.io/klog/v2/go.sum delete mode 100644 vendor/sigs.k8s.io/yaml/go.mod delete mode 100644 vendor/sigs.k8s.io/yaml/go.sum diff --git a/go.mod b/go.mod index 780d44309..d58aa23ff 100644 --- a/go.mod +++ b/go.mod @@ -1,37 +1,88 @@ module github.com/flannel-io/flannel -go 1.16 +go 1.17 + +replace google.golang.org/grpc => google.golang.org/grpc v1.29.0 require ( - github.com/Microsoft/hcsshim v0.8.6 - github.com/aws/aws-sdk-go v1.12.54 + github.com/Microsoft/hcsshim v0.9.2 + github.com/aws/aws-sdk-go v1.15.11 github.com/bronze1man/goStrongswanVici v0.0.0-20201105010758-936f38b697fd - github.com/containernetworking/plugins v0.8.6 - github.com/coreos/etcd v3.1.11+incompatible - github.com/coreos/go-iptables v0.4.5 - github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 - github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf - github.com/denverdino/aliyungo v0.0.0-20170629053852-f6cab0c35083 + github.com/containernetworking/plugins v0.9.1 + github.com/coreos/go-iptables v0.5.0 + github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f + github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba github.com/go-ini/ini v1.28.1 // indirect - github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/btree v1.0.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/joho/godotenv v0.0.0-20161216230537-726cc8b906e3 - github.com/jonboulle/clockwork v0.1.0 - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/jonboulle/clockwork v0.2.2 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.11.0 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/stretchr/testify v1.6.1 // indirect + github.com/soheilhy/cmux v0.1.5 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.67 - github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7 // indirect - github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf - github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect + github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae + go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 + go.uber.org/zap v1.17.0 // indirect golang.org/x/net v0.0.0-20211020060615-d418f374d309 - golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect golang.zx2c4.com/wireguard v0.0.0-20211022192229-f1f626090e3b // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211019185142-5be1d6054c42 - google.golang.org/api v0.15.0 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - k8s.io/api v0.19.5 - k8s.io/apimachinery v0.19.5 - k8s.io/client-go v0.19.5 + google.golang.org/api v0.22.0 + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect + k8s.io/api v0.20.6 + k8s.io/apimachinery v0.20.6 + k8s.io/client-go v0.20.6 k8s.io/klog v1.0.0 ) + +require ( + cloud.google.com/go v0.57.0 // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/containerd/cgroups v1.0.1 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v0.2.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect + github.com/hashicorp/golang-lru v0.5.1 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect + github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect + github.com/json-iterator/go v1.1.11 // indirect + github.com/mdlayher/genetlink v1.0.0 // indirect + github.com/mdlayher/netlink v1.4.1 // indirect + github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.opencensus.io v0.22.3 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect + golang.org/x/text v0.3.6 // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/grpc v1.40.0 // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.4.0 // indirect + k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd // indirect + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect +) diff --git a/go.sum b/go.sum index 8b986fff9..315b2119d 100644 --- a/go.sum +++ b/go.sum @@ -1,61 +1,229 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/aws/aws-sdk-go v1.12.54 h1:k9PjZW5+9Y4IomO0w1Qsz93+RD6S2cTp5fD+hib6tcE= -github.com/aws/aws-sdk-go v1.12.54/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11 h1:m45+Ru/wA+73cOZXiEGLDH2d9uLN3iHqMc0/z4noDXE= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bronze1man/goStrongswanVici v0.0.0-20201105010758-936f38b697fd h1:qn6a8rGrW+7p4ghypmYHZUKewXURuUDYxKqZxEoFjPc= github.com/bronze1man/goStrongswanVici v0.0.0-20201105010758-936f38b697fd/go.mod h1:fWUtBEPt2yjrr3WFhOqvajM8JSEU8bEeBcoeSCsKRpc= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.8.6 h1:npZTLiMa4CRn6m5P9+1Dz4O1j0UeFbm8VYN6dlsw568= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/coreos/etcd v3.1.11+incompatible h1:8Zwb+fsI+3wXGp23Pjn1eEFE/3P5ib5yXVoAMRxFOgM= -github.com/coreos/etcd v3.1.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= +github.com/containernetworking/plugins v0.9.1 h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9r2Quag7HMLV8= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM= +github.com/coreos/go-iptables v0.5.0 h1:mw6SAibtHKZcNzAsOxjoHIG0gy5YFHhypWSSNc6EjbQ= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -63,64 +231,136 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denverdino/aliyungo v0.0.0-20170629053852-f6cab0c35083 h1:XW41NGZPNsS9w43o/fc+xQHudjvmGAFbGjN4r8VUkHM= -github.com/denverdino/aliyungo v0.0.0-20170629053852-f6cab0c35083/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.28.1 h1:31infb1d2q1XtBq7msGogTnG0vODN/5poMiHg+cpXQw= github.com/go-ini/ini v1.28.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -128,9 +368,15 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -138,24 +384,54 @@ github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyyc github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v0.0.0-20161216230537-726cc8b906e3 h1:zShOjUfrFegEHgln4TPkWk3KkN9sug3Es3Ml6YpgFJI= github.com/joho/godotenv v0.0.0-20161216230537-726cc8b906e3/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 h1:uhL5Gw7BINiiPAo24A2sxkcDI0Jt/sqp1v5xQCniEFA= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= @@ -166,22 +442,50 @@ github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9 github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190 h1:iycCSDo8EKVueI9sfVBBJmtNn9DnXV/K1YWwEJO+uOs= github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= @@ -199,180 +503,455 @@ github.com/mdlayher/netlink v1.4.1 h1:I154BCU+mKlIf7BgcAJB2r7QjveNPty6uNY1g9ChVf github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 h1:qEtkL8n1DAHpi5/AOgAckwGQUlMe4+jhL/GMt+GKIks= github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tencentcloud/tencentcloud-sdk-go v1.0.67 h1:fKSwJ7hrvHTxr33EcmrbKcavYJ/U2zNIH8Lvsj2FNTE= github.com/tencentcloud/tencentcloud-sdk-go v1.0.67/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= -github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7 h1:BPPUhSq7uU6E9lFzyb81vjwVOhiWwMXp0EpKL75NX+8= -github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf h1:3J37+NPjNyGW/dbfXtj3yWuF9OEepIdGOXRaJGbORV8= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo= golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -382,13 +961,37 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -399,36 +1002,56 @@ golang.zx2c4.com/wireguard v0.0.0-20211022192229-f1f626090e3b h1:hs4TGiP+lKhzFME golang.zx2c4.com/wireguard v0.0.0-20211022192229-f1f626090e3b/go.mod h1:RTjaYEQboNk7+2qfPGBotaMEh/5HIvmPZ6DIe10lTqI= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211019185142-5be1d6054c42 h1:0sqC9RNWUKz+9iUvfRZS6PYtJwpWqDwb/87NR2UpVcY= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211019185142-5be1d6054c42/go.mod h1:G0zJhHaavrPDNb/ygHzf4uju6nSlKMi4f1E5RCT3WpE= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.29.0 h1:2pJjwYOdkZ9HlN4sWRYBg9ttH5bCOlsueaM+b/oYjwo= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -437,54 +1060,102 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.19.5 h1:p0MRzyhokJ9Kn5jcJAHNup0s+COMBPfn1mTasls6mMg= -k8s.io/api v0.19.5/go.mod h1:yGZReuNa0vj56op6eT+NLrXJne0R0u9ktexZ8jdJzpc= -k8s.io/apimachinery v0.19.5 h1:Yvz6dOE0WbVE+FXBEFqc9lSvo87VPtq6mCSsrtC95HI= -k8s.io/apimachinery v0.19.5/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= -k8s.io/client-go v0.19.5 h1:Y7LsFwgbm9+5oVXER04KNCSPhY6TblYRgG1DQdVq+ig= -k8s.io/client-go v0.19.5/go.mod h1:BSG3iuxI40Bs0nNDLS1JRa/7ReBQDHzf0x8nZZrK0fo= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6 h1:R5p3SlhaABYShQSO6LpPsYHjV05Q+79eBUR0Ut/f4tk= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json b/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json deleted file mode 100644 index ca022ccc4..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "metadata", - "name_pretty": "Google Compute Engine Metadata API", - "product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata", - "client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata", - "release_level": "ga", - "language": "go", - "repo": "googleapis/google-cloud-go", - "distribution_name": "cloud.google.com/go/compute/metadata", - "api_id": "compute:metadata", - "requires_billing": false -} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 0b50c2a7a..1a7a4c7e5 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -61,25 +61,14 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) +var defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, +}} // NotDefinedError is returned when requested metadata is not defined. // @@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool { return name == "Google" || name == "Google Compute Engine" } -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). +// Subscribe calls Client.Subscribe on the default client. func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) + return defaultClient.Subscribe(suffix, fn) } // Get calls Client.Get on the default client. @@ -280,9 +268,14 @@ type Client struct { hc *http.Client } -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. +// NewClient returns a Client that can be used to fetch metadata. +// Returns the client that uses the specified http.Client for HTTP requests. +// If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { + if c == nil { + return defaultClient + } + return &Client{hc: c} } diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 000000000..ae1b4942b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 4334ff1cb..0385e4108 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -16,6 +16,7 @@ import ( //sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort //sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus //sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult type atomicBool int32 @@ -79,6 +80,7 @@ type win32File struct { wg sync.WaitGroup wgLock sync.RWMutex closing atomicBool + socket bool readDeadline deadlineHandler writeDeadline deadlineHandler } @@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { } func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return makeWin32File(h) + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil } // closeHandle closes the resources associated with a Win32 handle @@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er if f.closing.isSet() { err = ErrFileClosed } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: cancelIoEx(f.handle, &c.o) @@ -265,6 +277,10 @@ func (f *win32File) Flush() error { return syscall.FlushFileBuffers(f.handle) } +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + func (d *deadlineHandler) set(deadline time.Time) error { d.setLock.Lock() defer d.setLock.Unlock() diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index ada2fbab6..3ab6bff69 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -5,21 +5,14 @@ package winio import ( "os" "runtime" - "syscall" "unsafe" -) - -//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx -//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle -const ( - fileBasicInfo = 0 - fileIDInfo = 0x12 + "golang.org/x/sys/windows" ) // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 pad uint32 // padding } @@ -27,7 +20,7 @@ type FileBasicInfo struct { // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -36,13 +29,32 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) return nil } +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + // FileIDInfo contains the volume serial number and file ID for a file. This pair should be // unique on a system. type FileIDInfo struct { @@ -53,7 +65,7 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 000000000..b632f8f8b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,307 @@ +// +build windows + +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index d99eedb64..96700a73d 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -3,10 +3,13 @@ package winio import ( + "context" "errors" + "fmt" "io" "net" "os" + "runtime" "syscall" "time" "unsafe" @@ -18,6 +21,48 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} const ( cERROR_PIPE_BUSY = syscall.Errno(231) @@ -25,21 +70,20 @@ const ( cERROR_PIPE_CONNECTED = syscall.Errno(535) cERROR_SEM_TIMEOUT = syscall.Errno(121) - cPIPE_ACCESS_DUPLEX = 0x3 - cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 - cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + cPIPE_TYPE_MESSAGE = 4 - cPIPE_UNLIMITED_INSTANCES = 255 + cPIPE_READMODE_MESSAGE = 2 - cNMPWAIT_USE_DEFAULT_WAIT = 0 - cNMPWAIT_NOWAIT = 1 + cFILE_OPEN = 1 + cFILE_CREATE = 2 - cPIPE_TYPE_MESSAGE = 4 + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - cPIPE_READMODE_MESSAGE = 2 + cSE_DACL_PRESENT = 4 ) var ( @@ -137,33 +181,60 @@ func (s pipeAddress) String() string { return string(s) } +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { + for { + + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + // DialPipe connects to a named pipe by path, timing out if the connection // takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 5 seconds. (We do not use WaitNamedPipe.) +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { absTimeout = time.Now().Add(*timeout) } else { - absTimeout = time.Now().Add(time.Second * 2) + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { var err error var h syscall.Handle - for { - h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != cERROR_PIPE_BUSY { - break - } - if time.Now().After(absTimeout) { - return nil, ErrTimeout - } - - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(time.Millisecond * 10) - } + h, err = tryDialPipe(ctx, &path, access) if err != nil { - return nil, &os.PathError{Op: "open", Path: path, Err: err} + return nil, err } var flags uint32 @@ -194,43 +265,87 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle - path string - securityDescriptor []byte - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int } -func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. if first { - flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } } - var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - mode |= cPIPE_TYPE_MESSAGE + typ |= cFILE_PIPE_MESSAGE_TYPE } - sa := &syscall.SecurityAttributes{} - sa.Length = uint32(unsafe.Sizeof(*sa)) - if securityDescriptor != nil { - len := uint32(len(securityDescriptor)) - sa.SecurityDescriptor = localAlloc(0, len) - defer localFree(sa.SecurityDescriptor) - copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE } - h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } + + runtime.KeepAlive(ntPath) return h, nil } func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) if err != nil { return nil, err } @@ -314,10 +429,10 @@ type PipeConfig struct { // when the pipe is in message mode. MessageMode bool - // InputBufferSize specifies the size the input buffer, in bytes. + // InputBufferSize specifies the size of the input buffer, in bytes. InputBufferSize int32 - // OutputBufferSize specifies the size the input buffer, in bytes. + // OutputBufferSize specifies the size of the output buffer, in bytes. OutputBufferSize int32 } @@ -341,32 +456,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { if err != nil { return nil, err } - // Create a client handle and connect it. This results in the pipe - // instance always existing, so that clients see ERROR_PIPE_BUSY - // rather than ERROR_FILE_NOT_FOUND. This ties the first instance - // up so that no other instances can be used. This would have been - // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe - // instead of CreateNamedPipe. (Apparently created named pipes are - // considered to be in listening state regardless of whether any - // active calls to ConnectNamedPipe are outstanding.) - h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != nil { - syscall.Close(h) - return nil, err - } - // Close the client handle. The server side of the instance will - // still be busy, leading to ERROR_PIPE_BUSY instead of - // ERROR_NOT_FOUND, as long as we don't close the server handle, - // or disconnect the client with DisconnectNamedPipe. - syscall.Close(h2) l := &win32PipeListener{ - firstHandle: h, - path: path, - securityDescriptor: sd, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), } go l.listenerRoutine() return l, nil diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 000000000..f497c0e39 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,237 @@ +// +build windows + +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go new file mode 100644 index 000000000..fca241590 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -0,0 +1,161 @@ +// +build windows + +package security + +import ( + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +type ( + accessMask uint32 + accessMode uint32 + desiredAccess uint32 + inheritMode uint32 + objectType uint32 + shareMode uint32 + securityInformation uint32 + trusteeForm uint32 + trusteeType uint32 + + explicitAccess struct { + accessPermissions accessMask + accessMode accessMode + inheritance inheritMode + trustee trustee + } + + trustee struct { + multipleTrustee *trustee + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr + } +) + +const ( + accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ + + accessModeGrant accessMode = 1 + + desiredAccessReadControl desiredAccess = 0x20000 + desiredAccessWriteDac desiredAccess = 0x40000 + + gvmga = "GrantVmGroupAccess:" + + inheritModeNoInheritance inheritMode = 0x0 + inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 + + objectTypeFileObject objectType = 0x1 + + securityInformationDACL securityInformation = 0x4 + + shareModeRead shareMode = 0x1 + shareModeWrite shareMode = 0x2 + + sidVmGroup = "S-1-5-83-0" + + trusteeFormIsSid trusteeForm = 0 + + trusteeTypeWellKnownGroup trusteeType = 5 +) + +// GrantVMGroupAccess sets the DACL for a specified file or directory to +// include Grant ACE entries for the VM Group SID. This is a golang re- +// implementation of the same function in vmcompute, just not exported in +// RS5. Which kind of sucks. Sucks a lot :/ +func GrantVmGroupAccess(name string) error { + // Stat (to determine if `name` is a directory). + s, err := os.Stat(name) + if err != nil { + return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + } + + // Get a handle to the file/directory. Must defer Close on success. + fd, err := createFile(name, s.IsDir()) + if err != nil { + return err // Already wrapped + } + defer syscall.CloseHandle(fd) + + // Get the current DACL and Security Descriptor. Must defer LocalFree on success. + ot := objectTypeFileObject + si := securityInformationDACL + sd := uintptr(0) + origDACL := uintptr(0) + if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { + return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + + // Generate a new DACL which is the current DACL with the required ACEs added. + // Must defer LocalFree on success. + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) + if err != nil { + return err // Already wrapped + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + + // And finally use SetSecurityInfo to apply the updated DACL. + if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { + return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + } + + return nil +} + +// createFile is a helper function to call [Nt]CreateFile to get a handle to +// the file or directory. +func createFile(name string, isDir bool) (syscall.Handle, error) { + namep := syscall.StringToUTF16(name) + da := uint32(desiredAccessReadControl | desiredAccessWriteDac) + sm := uint32(shareModeRead | shareModeWrite) + fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + if isDir { + fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + } + fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) + if err != nil { + return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + } + return fd, nil +} + +// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. +// The caller is responsible for LocalFree of the returned DACL on success. +func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { + // Generate pointers to the SIDs based on the string SIDs + sid, err := syscall.StringToSid(sidVmGroup) + if err != nil { + return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + } + + inheritance := inheritModeNoInheritance + if isDir { + inheritance = inheritModeSubContainersAndObjectsInherit + } + + eaArray := []explicitAccess{ + explicitAccess{ + accessPermissions: accessMaskDesiredPermission, + accessMode: accessModeGrant, + inheritance: inheritance, + trustee: trustee{ + trusteeForm: trusteeFormIsSid, + trusteeType: trusteeTypeWellKnownGroup, + name: uintptr(unsafe.Pointer(sid)), + }, + }, + } + + modifiedDACL := uintptr(0) + if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { + return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + } + + return modifiedDACL, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go new file mode 100644 index 000000000..c40c2739b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -0,0 +1,7 @@ +package security + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go + +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go new file mode 100644 index 000000000..4a90cb3cc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -0,0 +1,70 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package security + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") +) + +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 20d64cf41..5955c99fd 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 000000000..b03b789e6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,323 @@ +// +build windows + +package vhd + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go + +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath + +type ( + CreateVirtualDiskFlag uint32 + VirtualDiskFlag uint32 + AttachVirtualDiskFlag uint32 + DetachVirtualDiskFlag uint32 + VirtualDiskAccessMask uint32 +) + +type VirtualStorageType struct { + DeviceID uint32 + VendorID guid.GUID +} + +type CreateVersion2 struct { + UniqueID guid.GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + PhysicalSectorSizeInByte uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType VirtualStorageType + SourceVirtualStorageType VirtualStorageType + ResiliencyGUID guid.GUID +} + +type CreateVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 CreateVersion2 +} + +type OpenVersion2 struct { + GetInfoOnly bool + ReadOnly bool + ResiliencyGUID guid.GUID +} + +type OpenVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 OpenVersion2 +} + +type AttachVersion2 struct { + RestrictedOffset uint64 + RestrictedLength uint64 +} + +type AttachVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 AttachVersion2 +} + +const ( + VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 + + // Access Mask for opening a VHD + VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 + VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 + VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 + VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 + VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 + VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 + VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 + VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 + VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 + VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 + + // Flags for creating a VHD + CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 + CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 + CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 + CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 + CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 + CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 + CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 + + // Flags for opening a VHD + OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 + OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 + OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 + OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 + OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 + OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 + OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 + OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 + OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 + OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 + OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 + + // Flags for attaching a VHD + AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 + AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 + AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 + AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 + AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 + AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 + AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 + AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 + AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 + AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 + AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 + + // Flags for detaching a VHD + DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 +) + +// CreateVhdx is a helper function to create a simple vhdx file at the given path using +// default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + params := CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) + if err != nil { + return err + } + + if err := syscall.CloseHandle(handle); err != nil { + return err + } + return nil +} + +// DetachVirtualDisk detaches a virtual hard disk by handle. +func DetachVirtualDisk(handle syscall.Handle) (err error) { + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return errors.Wrap(err, "failed to detach virtual disk") + } + return nil +} + +// DetachVhd detaches a vhd found at `path`. +func DetachVhd(path string) error { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + return DetachVirtualDisk(handle) +} + +// AttachVirtualDisk attaches a virtual hard disk for use. +func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { + // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. + if err := attachVirtualDisk( + handle, + nil, + uint32(attachVirtualDiskFlag), + 0, + parameters, + nil, + ); err != nil { + return errors.Wrap(err, "failed to attach virtual disk") + } + return nil +} + +// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 +// of the ATTACH_VIRTUAL_DISK_PARAMETERS. +func AttachVhd(path string) (err error) { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + + defer syscall.CloseHandle(handle) + params := AttachVirtualDiskParameters{Version: 2} + if err := AttachVirtualDisk( + handle, + AttachVirtualDiskFlagNone, + ¶ms, + ); err != nil { + return errors.Wrap(err, "failed to attach virtual disk") + } + return nil +} + +// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. +func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { + parameters := OpenVirtualDiskParameters{Version: 2} + handle, err := OpenVirtualDiskWithParameters( + vhdPath, + virtualDiskAccessMask, + openVirtualDiskFlags, + ¶meters, + ) + if err != nil { + return 0, err + } + return handle, nil +} + +// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. +func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + if err := openVirtualDisk( + &defaultType, + vhdPath, + uint32(virtualDiskAccessMask), + uint32(openVirtualDiskFlags), + parameters, + &handle, + ); err != nil { + return 0, errors.Wrap(err, "failed to open virtual disk") + } + return handle, nil +} + +// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. +func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + + if err := createVirtualDisk( + &defaultType, + path, + uint32(virtualDiskAccessMask), + nil, + uint32(createVirtualDiskFlags), + 0, + parameters, + nil, + &handle, + ); err != nil { + return handle, errors.Wrap(err, "failed to create virtual disk") + } + return handle, nil +} + +// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical +// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer +// that represents the particular enumeration of the physical disk on the caller's system. +func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { + var ( + diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars + diskPhysicalPathBuf [256]uint16 + ) + if err := getVirtualDiskPhysicalPath( + handle, + &diskPathSizeInBytes, + &diskPhysicalPathBuf[0], + ); err != nil { + return "", errors.Wrap(err, "failed to get disk physical path") + } + return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil +} + +// CreateDiffVhd is a helper function to create a differencing virtual disk. +func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { + // Setting `ParentPath` is how to signal to create a differencing disk. + createParams := &CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + ParentPath: windows.StringToUTF16Ptr(baseVhdPath), + BlockSizeInBytes: blockSizeInMB * 1024 * 1024, + OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), + }, + } + + vhdHandle, err := CreateVirtualDisk( + diffVhdPath, + VirtualDiskAccessNone, + CreateVirtualDiskFlagNone, + createParams, + ) + if err != nil { + return fmt.Errorf("failed to create differencing vhd: %s", err) + } + if err := syscall.CloseHandle(vhdHandle); err != nil { + return fmt.Errorf("failed to close differencing vhd handle: %s", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go new file mode 100644 index 000000000..572f7b42f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -0,0 +1,106 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") + procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") + procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") +) + +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) +} + +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 3f527639a..176ff75e3 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated by 'go generate'; DO NOT EDIT. package winio @@ -19,6 +19,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +27,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -37,484 +38,390 @@ func errnoErr(e syscall.Errno) error { } var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procLocalFree = modkernel32.NewProc("LocalFree") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procbind = modws2_32.NewProc("bind") ) -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) } return } -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(accountName) if err != nil { return } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) } -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func waitNamedPipe(name string, timeout uint32) (err error) { +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } - return _waitNamedPipe(_p0, timeout) + return _lookupPrivilegeName(_p0, luid, buffer, size) } -func _waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return } - return + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) } -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) + return } -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) + return } -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return } - return + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } return } -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } return } -func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return } - return + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) } return } -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) return } -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) return } -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return } -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return } -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) return } -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return } -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) } return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 - } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) } return } diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes new file mode 100644 index 000000000..94f480de9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore index b883f1fdc..54ed6f06c 100644 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -1 +1,38 @@ +# Binaries for programs and plugins *.exe +*.dll +*.so +*.dylib + +# Ignore vscode setting files +.vscode/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Ignore gcs bin directory +service/bin/ +service/pkg/ + +*.img +*.vhd +*.tar.gz + +# Make stuff +.rootfs-done +bin/* +rootfs/* +*.o +/build/ + +deps/* +out/* + +.idea/ +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 000000000..2400e7f1e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,99 @@ +run: + timeout: 8m + +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gometalinter.json b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json deleted file mode 100644 index 00e9a6e2e..000000000 --- a/vendor/github.com/Microsoft/hcsshim/.gometalinter.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Vendor": true, - "Deadline": "2m", - "Sort": [ - "linter", - "severity", - "path", - "line" - ], - "Skip": [ - "internal\\schema2" - ], - "EnableGC": true, - "Enable": [ - "gofmt" - ] -} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS new file mode 100644 index 000000000..f4c5a07d1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS @@ -0,0 +1 @@ +* @microsoft/containerplat \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile new file mode 100644 index 000000000..a8f5516cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -0,0 +1,87 @@ +BASE:=base.tar.gz + +GO:=go +GO_FLAGS:=-ldflags "-s -w" # strip Go binaries +CGO_ENABLED:=0 +GOMODVENDOR:= + +CFLAGS:=-O2 -Wall +LDFLAGS:=-static -s # strip C binaries + +GO_FLAGS_EXTRA:= +ifeq "$(GOMODVENDOR)" "1" +GO_FLAGS_EXTRA += -mod=vendor +endif +GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +# The link aliases for gcstools +GCS_TOOLS=\ + generichook + +.PHONY: all always rootfs test + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin deps rootfs out + +test: + cd $(SRCROOT) && go test -v ./internal/guest/... + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch + tar -zcf $@ -C rootfs . + rm -rf rootfs + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed + +-include deps/cmd/gcs.gomake +-include deps/cmd/gcstools.gomake + +# Implicit rule for includes that define Go targets. +%.gomake: $(SRCROOT)/Makefile + @mkdir -p $(dir $@) + @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new + @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new + @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new + @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new + @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new + @/bin/echo -e '\tmv $$@.new $$@' >> $@.new + @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new + mv $@.new $@ + +VPATH=$(SRCROOT) + +bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +bin/init: init/init.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +%.o: %.c + @mkdir -p $(dir $@) + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml new file mode 100644 index 000000000..ee18671aa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -0,0 +1,49 @@ +version = "unstable" +generator = "gogoctrd" +plugins = ["grpc", "fieldpath"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["./protobuf"] + + # Paths that should be treated as include roots in relation to the vendor + # directory. These will be calculated with the vendor directory nearest the + # target package. + packages = ["github.com/gogo/protobuf"] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" + "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1" + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"] +plugins = ["ttrpc"] \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 15b39181a..b8ca926a9 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -1,14 +1,68 @@ # hcsshim -[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master) +[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) -This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. -It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. +It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. + +## Building + +While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +### Linux Hyper-V Container Guest Agent + +To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. +```powershell +C:\> $env:GOOS="linux" +C:\> go build .\cmd\gcs\ +``` + +or on a Linux machine +```sh +> go build ./cmd/gcs +``` + +If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. + +```sh +docker pull busybox +docker run --name base_image_container busybox +docker export base_image_container | gzip > base.tar.gz +BASE=./base.tar.gz +make all +``` + +If the build is successful, in the `./out` folder you should see: +```sh +> ls ./out/ +delta.tar.gz initrd.img rootfs.tar.gz +``` + +### Containerd Shim +For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. + +```powershell +C:\> $env:GOOS="windows" +C:\> go build .\cmd\containerd-shim-runhcs-v1 +``` + +Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +```powershell +.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii +``` + +This config file will already have the shim set as the default runtime for cri interactions. + +To trial using the shim out with ctr.exe: +```powershell +C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" +``` ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. @@ -16,6 +70,31 @@ When you submit a pull request, a CLA-bot will automatically determine whether y a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. +We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to +certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for +more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure +that all commits in a given PR are signed-off. + +### Test Directory (Important to note) + +This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this +project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has +its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file +has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project +(which is the repo itself on your disk). + +``` +replace ( + github.com/Microsoft/hcsshim => ../ +) +``` + +Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the +CI in this project will check if the files are out of date and will fail if this is true. + + +## Code of Conduct + This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/vendor/github.com/Microsoft/hcsshim/appveyor.yml deleted file mode 100644 index a8ec5a593..000000000 --- a/vendor/github.com/Microsoft/hcsshim/appveyor.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: 0.1.{build} - -image: Visual Studio 2017 - -clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim - -environment: - GOPATH: c:\gopath - PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH% - -stack: go 1.11 - -build_script: - - appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip - - 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL - - gometalinter.exe --config .gometalinter.json ./... - - go build ./cmd/wclayer - - go build ./cmd/runhcs - - go build ./cmd/tar2ext4 - - go test -v ./... -tags admin - - go test -c ./test/functional/ -tags functional - - go test -c ./test/runhcs/ -tags integration - -artifacts: - - path: 'wclayer.exe' - - path: 'runhcs.exe' - - path: 'tar2ext4.exe' - - path: 'functional.test.exe' - - path: 'runhcs.test.exe' \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go new file mode 100644 index 000000000..7f1f2823d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// AttachLayerStorageFilter sets up the layer storage filter on a writable +// container layer. +// +// `layerPath` is a path to a directory the writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.AttachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go new file mode 100644 index 000000000..8e28e6c50 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DestroyLayer deletes a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DestroyLayer(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDestroyLayer(layerPath) + if err != nil { + return errors.Wrap(err, "failed to destroy layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go new file mode 100644 index 000000000..435473257 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DetachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDetachLayerStorageFilter(layerPath) + if err != nil { + return errors.Wrap(err, "failed to detach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go new file mode 100644 index 000000000..a1b12dd12 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -0,0 +1,46 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ExportLayer exports a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +// +// `exportFolderPath` is a pre-existing folder to export the layer to. +// +// `layerData` is the parent layer data. +// +// `options` are the export options applied to the exported layer. +func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { + title := "hcsshim.ExportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("exportFolderPath", exportFolderPath), + ) + + ldbytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + obytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) + if err != nil { + return errors.Wrap(err, "failed to export layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go new file mode 100644 index 000000000..83c0fa33f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. +// +// If the VHD is not mounted it will be temporarily mounted. +func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { + title := "hcsshim.FormatWritableLayerVhd" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + err = hcsFormatWritableLayerVhd(vhdHandle) + if err != nil { + return errors.Wrap(err, "failed to format writable layer vhd") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go new file mode 100644 index 000000000..87fee452c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -0,0 +1,193 @@ +package computestorage + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio/pkg/security" + "github.com/Microsoft/go-winio/vhd" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const defaultVHDXBlockSizeInMB = 1 + +// SetupContainerBaseLayer is a helper to setup a containers scratch. It +// will create and format the vhdx's inside and the size is configurable with the sizeInGB +// parameter. +// +// `layerPath` is the path to the base container layer on disk. +// +// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. +// +// `diffVhdPath` is the path where the differencing disk for the base layer should be created. +// +// `sizeInGB` is the size in gigabytes to make the base vhdx. +func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + var ( + hivesPath = filepath.Join(layerPath, "Hives") + layoutPath = filepath.Join(layerPath, "Layout") + ) + + // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files + // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and + // differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(hivesPath); err == nil { + if err := os.RemoveAll(hivesPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting hives directory") + } + } + if _, err := os.Stat(layoutPath); err == nil { + if err := os.RemoveAll(layoutPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting layout file") + } + } + + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx path") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { + return err + } + // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + options := OsLayerOptions{ + Type: OsLayerTypeContainer, + } + + // SetupBaseOSLayer expects an empty vhd handle for a container layer and will + // error out otherwise. + if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { + return err + } + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} + +// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format +// the vhdx inside and the size is configurable by the sizeInGB parameter. +// +// `uvmPath` is the path to the UtilityVM filesystem. +// +// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. +// +// `diffVhdPath` is the path where the differencing disk for the UVM should be created. +// +// `sizeInGB` specifies the size in gigabytes to make the base vhdx. +func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + // Remove the base and differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + // Just create the vhdx for utilityVM layer, no need to format it. + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + // If it is a UtilityVM layer then the base vhdx must be attached when calling + // `SetupBaseOSLayer` + attachParams := &vhd.AttachVirtualDiskParameters{ + Version: 2, + } + if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { + return errors.Wrapf(err, "failed to attach virtual disk") + } + + options := OsLayerOptions{ + Type: OsLayerTypeVM, + } + if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { + return err + } + + // Detach and close the handle after setting up the layer as we don't need the handle + // for anything else and we no longer need to be attached either. + if err = vhd.DetachVirtualDisk(handle); err != nil { + return errors.Wrap(err, "failed to detach vhdx") + } + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go new file mode 100644 index 000000000..0c61dab32 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -0,0 +1,41 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ImportLayer imports a container layer. +// +// `layerPath` is a path to a directory to import the layer to. If the directory +// does not exist it will be automatically created. +// +// `sourceFolderpath` is a pre-existing folder that contains the layer to +// import. +// +// `layerData` is the parent layer data. +func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { + title := "hcsshim.ImportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("sourceFolderPath", sourceFolderPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to import layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go new file mode 100644 index 000000000..53ed8ea6e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// InitializeWritableLayer initializes a writable layer for a container. +// +// `layerPath` is a path to a directory the layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.InitializeWritableLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + // Options are not used in the platform as of RS5 + err = hcsInitializeWritableLayer(layerPath, string(bytes), "") + if err != nil { + return errors.Wrap(err, "failed to intitialize container layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go new file mode 100644 index 000000000..fcdbbef81 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -0,0 +1,27 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. +func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { + title := "hcsshim.GetLayerVhdMountPath" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var mountPath *uint16 + err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) + if err != nil { + return "", errors.Wrap(err, "failed to get vhd mount path") + } + path = interop.ConvertAndFreeCoTaskMemString(mountPath) + return path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go new file mode 100644 index 000000000..06aaf841e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -0,0 +1,74 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// SetupBaseOSLayer sets up a layer that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` +// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type +// == OsLayerTypeVm`. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { + title := "hcsshim.SetupBaseOSLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} + +// SetupBaseOSVolume sets up a volume that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `volumePath` is the path to the volume to be used for setup. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { + if osversion.Build() < 19645 { + return errors.New("SetupBaseOSVolume is not present on builds older than 19645") + } + title := "hcsshim.SetupBaseOSVolume" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go new file mode 100644 index 000000000..95aff9c18 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -0,0 +1,50 @@ +// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced +// separate from the original graphdriver calls intended to give more freedom around creating +// and managing container layers and scratch spaces. +package computestorage + +import ( + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go + +//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? +//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? +//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? +//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? +//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? +//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? +//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? +//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? +//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? + +// LayerData is the data used to describe parent layer information. +type LayerData struct { + SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` +} + +// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. +type ExportLayerOptions struct { + IsWritableLayer bool `json:"IsWritableLayer,omitempty"` +} + +// OsLayerType is the type of layer being operated on. +type OsLayerType string + +const ( + // OsLayerTypeContainer is a container layer. + OsLayerTypeContainer OsLayerType = "Container" + // OsLayerTypeVM is a virtual machine layer. + OsLayerTypeVM OsLayerType = "Vm" +) + +// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and +// `SetupBaseOSVolume` calls. +type OsLayerOptions struct { + Type OsLayerType `json:"Type,omitempty"` + DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` + SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go new file mode 100644 index 000000000..4f9518067 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -0,0 +1,319 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package computestorage + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") + + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") + procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") + procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") +) + +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) +} + +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p3 *uint16 + _p3, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsExportLayer(_p0, _p1, _p2, _p3) +} + +func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + if hr = procHcsDestoryLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSLayer(_p0, handle, _p1) +} + +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsInitializeWritableLayer(_p0, _p1, _p2) +} + +func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsInitializeWritableLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSVolume(_p0, _p1, _p2) +} + +func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go index e142c3154..bfd722898 100644 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -1,13 +1,15 @@ package hcsshim import ( + "context" "fmt" "os" + "sync" "time" "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" "github.com/Microsoft/hcsshim/internal/mergemaps" - "github.com/Microsoft/hcsshim/internal/schema1" ) // ContainerProperties holds the properties for a container and the processes running in that container @@ -52,7 +54,10 @@ const ( type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse type container struct { - system *hcs.System + system *hcs.System + waitOnce sync.Once + waitErr error + waitCh chan struct{} } // createComputeSystemAdditionalJSON is read from the environment at initialisation @@ -71,61 +76,87 @@ func CreateContainer(id string, c *ContainerConfig) (Container, error) { return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) } - system, err := hcs.CreateComputeSystem(id, fullConfig) + system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) if err != nil { return nil, err } - return &container{system}, err + return &container{system: system}, err } // OpenContainer opens an existing container by ID. func OpenContainer(id string) (Container, error) { - system, err := hcs.OpenComputeSystem(id) + system, err := hcs.OpenComputeSystem(context.Background(), id) if err != nil { return nil, err } - return &container{system}, err + return &container{system: system}, err } // GetContainers gets a list of the containers on the system that match the query func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { - return hcs.GetComputeSystems(q) + return hcs.GetComputeSystems(context.Background(), q) } // Start synchronously starts the container. func (container *container) Start() error { - return convertSystemError(container.system.Start(), container) + return convertSystemError(container.system.Start(context.Background()), container) } // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. func (container *container) Shutdown() error { - return convertSystemError(container.system.Shutdown(), container) + err := container.system.Shutdown(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} } // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. func (container *container) Terminate() error { - return convertSystemError(container.system.Terminate(), container) + err := container.system.Terminate(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} } // Waits synchronously waits for the container to shutdown or terminate. func (container *container) Wait() error { - return convertSystemError(container.system.Wait(), container) + err := container.system.Wait() + if err == nil { + err = container.system.ExitError() + } + return convertSystemError(err, container) } // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It // returns false if timeout occurs. -func (container *container) WaitTimeout(t time.Duration) error { - return convertSystemError(container.system.WaitTimeout(t), container) +func (container *container) WaitTimeout(timeout time.Duration) error { + container.waitOnce.Do(func() { + container.waitCh = make(chan struct{}) + go func() { + container.waitErr = container.Wait() + close(container.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} + case <-container.waitCh: + return container.waitErr + } } // Pause pauses the execution of a container. func (container *container) Pause() error { - return convertSystemError(container.system.Pause(), container) + return convertSystemError(container.system.Pause(context.Background()), container) } // Resume resumes the execution of a container. func (container *container) Resume() error { - return convertSystemError(container.system.Resume(), container) + return convertSystemError(container.system.Resume(context.Background()), container) } // HasPendingUpdates returns true if the container has updates pending to install @@ -135,7 +166,7 @@ func (container *container) HasPendingUpdates() (bool, error) { // Statistics returns statistics for the container. This is a legacy v1 call func (container *container) Statistics() (Statistics, error) { - properties, err := container.system.Properties(schema1.PropertyTypeStatistics) + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) if err != nil { return Statistics{}, convertSystemError(err, container) } @@ -145,7 +176,7 @@ func (container *container) Statistics() (Statistics, error) { // ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call func (container *container) ProcessList() ([]ProcessListItem, error) { - properties, err := container.system.Properties(schema1.PropertyTypeProcessList) + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) if err != nil { return nil, convertSystemError(err, container) } @@ -155,7 +186,7 @@ func (container *container) ProcessList() ([]ProcessListItem, error) { // This is a legacy v1 call func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { - properties, err := container.system.Properties(schema1.PropertyTypeMappedVirtualDisk) + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) if err != nil { return nil, convertSystemError(err, container) } @@ -165,20 +196,20 @@ func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskContr // CreateProcess launches a new process within the container. func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { - p, err := container.system.CreateProcess(c) + p, err := container.system.CreateProcess(context.Background(), c) if err != nil { return nil, convertSystemError(err, container) } - return &process{p}, nil + return &process{p: p.(*hcs.Process)}, nil } // OpenProcess gets an interface to an existing process within the container. func (container *container) OpenProcess(pid int) (Process, error) { - p, err := container.system.OpenProcess(pid) + p, err := container.system.OpenProcess(context.Background(), pid) if err != nil { return nil, convertSystemError(err, container) } - return &process{p}, nil + return &process{p: p}, nil } // Close cleans up any state associated with the container but does not terminate or wait for it. @@ -188,5 +219,5 @@ func (container *container) Close() error { // Modify the System func (container *container) Modify(config *ResourceModificationRequestResponse) error { - return convertSystemError(container.system.Modify(config), container) + return convertSystemError(container.system.Modify(context.Background(), config), container) } diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go index 63efa23c7..f367022e7 100644 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -59,7 +59,7 @@ var ( // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState - // ErrProcNotFound is an error encountered when the the process cannot be found + // ErrProcNotFound is an error encountered when a procedure look up fails. ErrProcNotFound = hcs.ErrProcNotFound // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 @@ -83,7 +83,6 @@ type NetworkNotFoundError = hns.NetworkNotFoundError type ProcessError struct { Process *process Operation string - ExtraInfo string Err error Events []hcs.ErrorEvent } @@ -92,7 +91,6 @@ type ProcessError struct { type ContainerError struct { Container *container Operation string - ExtraInfo string Err error Events []hcs.ErrorEvent } @@ -125,22 +123,9 @@ func (e *ContainerError) Error() string { s += "\n" + ev.String() } - if e.ExtraInfo != "" { - s += " extra info: " + e.ExtraInfo - } - return s } -func makeContainerError(container *container, operation string, extraInfo string, err error) error { - // Don't double wrap errors - if _, ok := err.(*ContainerError); ok { - return err - } - containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err} - return containerError -} - func (e *ProcessError) Error() string { if e == nil { return "" @@ -171,19 +156,10 @@ func (e *ProcessError) Error() string { return s } -func makeProcessError(process *process, operation string, extraInfo string, err error) error { - // Don't double wrap errors - if _, ok := err.(*ProcessError); ok { - return err - } - processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err} - return processError -} - // IsNotExist checks if an error is caused by the Container or Process not existing. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { if _, ok := err.(EndpointNotFoundError); ok { return true @@ -216,7 +192,7 @@ func IsTimeout(err error) bool { // a Container or Process being already stopped. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsAlreadyStopped(err error) bool { return hcs.IsAlreadyStopped(getInnerError(err)) } @@ -230,6 +206,18 @@ func IsNotSupported(err error) bool { return hcs.IsNotSupported(getInnerError(err)) } +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + return hcs.IsOperationInvalidState(getInnerError(err)) +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + return hcs.IsAccessIsDenied(getInnerError(err)) +} + func getInnerError(err error) error { switch pe := err.(type) { case nil: @@ -244,7 +232,7 @@ func getInnerError(err error) error { func convertSystemError(err error, c *container) error { if serr, ok := err.(*hcs.SystemError); ok { - return &ContainerError{Container: c, Operation: serr.Op, ExtraInfo: serr.Extra, Err: serr.Err, Events: serr.Events} + return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} } return err } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go index 8bae5fc0e..df3a59a78 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go @@ -7,7 +7,7 @@ import ( "fmt" "syscall" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" ) //go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go @@ -55,11 +55,14 @@ import ( //sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? //sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? -// Service -//sys hcnOpenService(service *hcnService, result **uint16) (hr error) = computenetwork.HcnOpenService? -//sys hcnRegisterServiceCallback(service hcnService, callback int32, context int32, callbackHandle *hcnCallbackHandle) (hr error) = computenetwork.HcnRegisterServiceCallback? -//sys hcnUnregisterServiceCallback(callbackHandle hcnCallbackHandle) (hr error) = computenetwork.HcnUnregisterServiceCallback? -//sys hcnCloseService(service hcnService) (hr error) = computenetwork.HcnCloseService? +// SDN Routes +//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes? +//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute? +//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute? +//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute? +//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties? +//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute? +//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute? type _guid = guid.GUID @@ -67,8 +70,7 @@ type hcnNetwork syscall.Handle type hcnEndpoint syscall.Handle type hcnNamespace syscall.Handle type hcnLoadBalancer syscall.Handle -type hcnService syscall.Handle -type hcnCallbackHandle syscall.Handle +type hcnRoute syscall.Handle // SchemaVersion for HCN Objects/Queries. type SchemaVersion = Version // hcnglobals.go @@ -91,6 +93,20 @@ type HostComputeQuery struct { Filter string `json:",omitempty"` } +type ExtraParams struct { + Resources json.RawMessage `json:",omitempty"` + SharedContainers json.RawMessage `json:",omitempty"` + LayeredOn string `json:",omitempty"` + SwitchGuid string `json:",omitempty"` + UtilityVM string `json:",omitempty"` + VirtualMachine string `json:",omitempty"` +} + +type Health struct { + Data interface{} `json:",omitempty"` + Extra ExtraParams `json:",omitempty"` +} + // defaultQuery generates HCN Query. // Passed into get/enumerate calls to filter results. func defaultQuery() HostComputeQuery { @@ -104,23 +120,17 @@ func defaultQuery() HostComputeQuery { return query } -func defaultQueryJson() string { - query := defaultQuery() - queryJson, err := json.Marshal(query) - if err != nil { - return "" - } - return string(queryJson) -} - // PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS func platformDoesNotSupportError(featureName string) error { - return fmt.Errorf("Platform does not support feature %s", featureName) + return fmt.Errorf("platform does not support feature %s", featureName) } // V2ApiSupported returns an error if the HCN version does not support the V2 Apis. func V2ApiSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.Api.V2 { return nil } @@ -136,7 +146,10 @@ func V2SchemaVersion() SchemaVersion { // RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. func RemoteSubnetSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.RemoteSubnet { return nil } @@ -145,7 +158,10 @@ func RemoteSubnetSupported() error { // HostRouteSupported returns an error if the HCN version does not support Host Route policies. func HostRouteSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.HostRoute { return nil } @@ -154,13 +170,148 @@ func HostRouteSupported() error { // DSRSupported returns an error if the HCN version does not support Direct Server Return. func DSRSupported() error { - supported := GetSupportedFeatures() + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } if supported.DSR { return nil } return platformDoesNotSupportError("Direct Server Return (DSR)") } +// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. +func Slash32EndpointPrefixesSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.Slash32EndpointPrefixes { + return nil + } + return platformDoesNotSupportError("Slash 32 Endpoint prefixes") +} + +// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. +func AclSupportForProtocol252Supported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.AclSupportForProtocol252 { + return nil + } + return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN") +} + +// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. +func SessionAffinitySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.SessionAffinity { + return nil + } + return platformDoesNotSupportError("Session Affinity") +} + +// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. +func IPv6DualStackSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.IPv6DualStack { + return nil + } + return platformDoesNotSupportError("IPv6 DualStack") +} + +//L4proxySupported returns an error if the HCN verison does not support L4Proxy +func L4proxyPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.L4Proxy { + return nil + } + return platformDoesNotSupportError("L4ProxyPolicy") +} + +// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy +func L4WfpProxyPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.L4WfpProxy { + return nil + } + return platformDoesNotSupportError("L4WfpProxyPolicy") +} + +// SetPolicySupported returns an error if the HCN version does not support SetPolicy. +func SetPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.SetPolicy { + return nil + } + return platformDoesNotSupportError("SetPolicy") +} + +// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. +func VxlanPortSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.VxlanPort { + return nil + } + return platformDoesNotSupportError("VXLAN port configuration") +} + +// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. +func TierAclPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.TierAcl { + return nil + } + return platformDoesNotSupportError("TierAcl") +} + +// NetworkACLPolicySupported returns an error if the HCN version does not support NetworkACLPolicy +func NetworkACLPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.NetworkACL { + return nil + } + return platformDoesNotSupportError("NetworkACL") +} + +// NestedIpSetSupported returns an error if the HCN version does not support NestedIpSet +func NestedIpSetSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.NestedIpSet { + return nil + } + return platformDoesNotSupportError("NestedIpSet") +} + // RequestType are the different operations performed to settings. // Used to update the settings of Endpoint/Namespace objects. type RequestType string diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go index d58335e5c..545e8639d 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go @@ -2,8 +2,9 @@ package hcn import ( "encoding/json" + "errors" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" ) @@ -36,6 +37,7 @@ type HostComputeEndpoint struct { Routes []Route `json:",omitempty"` MacAddress string `json:",omitempty"` Flags EndpointFlags `json:",omitempty"` + Health Health `json:",omitempty"` SchemaVersion SchemaVersion `json:",omitempty"` } @@ -57,6 +59,13 @@ type ModifyEndpointSettingRequest struct { Settings json.RawMessage `json:",omitempty"` } +// VmEndpointRequest creates a switch port with identifier `PortId`. +type VmEndpointRequest struct { + PortId guid.GUID `json:",omitempty"` + VirtualNicName string `json:",omitempty"` + VirtualMachineId guid.GUID `json:",omitempty"` +} + type PolicyEndpointRequest struct { Policies []EndpointPolicy `json:",omitempty"` } @@ -121,7 +130,10 @@ func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { } func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { - networkGuid := guid.FromString(networkId) + networkGuid, err := guid.FromString(networkId) + if err != nil { + return nil, errInvalidNetworkID + } // Open network. var networkHandle hcnNetwork var resultBuffer *uint16 @@ -167,7 +179,10 @@ func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndp } func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { - endpointGuid := guid.FromString(endpointId) + endpointGuid, err := guid.FromString(endpointId) + if err != nil { + return nil, errInvalidEndpointID + } // Open endpoint var ( endpointHandle hcnEndpoint @@ -208,7 +223,10 @@ func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, e } func deleteEndpoint(endpointId string) error { - endpointGuid := guid.FromString(endpointId) + endpointGuid, err := guid.FromString(endpointId) + if err != nil { + return errInvalidEndpointID + } var resultBuffer *uint16 hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { @@ -299,6 +317,10 @@ func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) + if endpoint.HostComputeNamespace != "" { + return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") + } + jsonString, err := json.Marshal(endpoint) if err != nil { return nil, err @@ -339,7 +361,7 @@ func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingReq } // ApplyPolicy applies a Policy (ex: ACL) on the Endpoint. -func (endpoint *HostComputeEndpoint) ApplyPolicy(endpointPolicy PolicyEndpointRequest) error { +func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) settingsJson, err := json.Marshal(endpointPolicy) @@ -348,7 +370,7 @@ func (endpoint *HostComputeEndpoint) ApplyPolicy(endpointPolicy PolicyEndpointRe } requestMessage := &ModifyEndpointSettingRequest{ ResourceType: EndpointResourceTypePolicy, - RequestType: RequestTypeUpdate, + RequestType: requestType, Settings: settingsJson, } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go index 6d46bf8bb..ad30d320d 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go @@ -3,13 +3,23 @@ package hcn import ( + "errors" "fmt" + "github.com/Microsoft/hcsshim/internal/hcs" "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" ) +var ( + errInvalidNetworkID = errors.New("invalid network ID") + errInvalidEndpointID = errors.New("invalid endpoint ID") + errInvalidNamespaceID = errors.New("invalid namespace ID") + errInvalidLoadBalancerID = errors.New("invalid load balancer ID") + errInvalidRouteID = errors.New("invalid route ID") +) + func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { errorFound := false @@ -26,7 +36,7 @@ func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { } if errorFound { - returnError := hcserror.New(hr, methodName, result) + returnError := new(hr, methodName, result) logrus.Debugf(returnError.Error()) // HCN errors logged for debugging. return returnError } @@ -34,6 +44,52 @@ func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { return nil } +type ErrorCode uint32 + +// For common errors, define the error as it is in windows, so we can quickly determine it later +const ( + ERROR_NOT_FOUND = 0x490 + HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 +) + +type HcnError struct { + *hcserror.HcsError + code ErrorCode +} + +func (e *HcnError) Error() string { + return e.HcsError.Error() +} + +func CheckErrorWithCode(err error, code ErrorCode) bool { + hcnError, ok := err.(*HcnError) + if ok { + return hcnError.code == code + } + return false +} + +func IsElementNotFoundError(err error) bool { + return CheckErrorWithCode(err, ERROR_NOT_FOUND) +} + +func IsPortAlreadyExistsError(err error) bool { + return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS) +} + +func new(hr error, title string, rest string) error { + err := &HcnError{} + hcsError := hcserror.New(hr, title, rest) + err.HcsError = hcsError.(*hcserror.HcsError) + err.code = ErrorCode(hcserror.Win32FromError(hr)) + return err +} + +// +// Note that the below errors are not errors returned by hcn itself +// we wish to seperate them as they are shim usage error +// + // NetworkNotFoundError results from a failed seach for a network by Id or Name type NetworkNotFoundError struct { NetworkName string @@ -41,10 +97,10 @@ type NetworkNotFoundError struct { } func (e NetworkNotFoundError) Error() string { - if e.NetworkName == "" { - return fmt.Sprintf("Network Name %s not found", e.NetworkName) + if e.NetworkName != "" { + return fmt.Sprintf("Network name %q not found", e.NetworkName) } - return fmt.Sprintf("Network Id %s not found", e.NetworkID) + return fmt.Sprintf("Network ID %q not found", e.NetworkID) } // EndpointNotFoundError results from a failed seach for an endpoint by Id or Name @@ -54,10 +110,10 @@ type EndpointNotFoundError struct { } func (e EndpointNotFoundError) Error() string { - if e.EndpointName == "" { - return fmt.Sprintf("Endpoint Name %s not found", e.EndpointName) + if e.EndpointName != "" { + return fmt.Sprintf("Endpoint name %q not found", e.EndpointName) } - return fmt.Sprintf("Endpoint Id %s not found", e.EndpointID) + return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) } // NamespaceNotFoundError results from a failed seach for a namsepace by Id @@ -66,7 +122,7 @@ type NamespaceNotFoundError struct { } func (e NamespaceNotFoundError) Error() string { - return fmt.Sprintf("Namespace %s not found", e.NamespaceID) + return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) } // LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id @@ -75,13 +131,22 @@ type LoadBalancerNotFoundError struct { } func (e LoadBalancerNotFoundError) Error() string { - return fmt.Sprintf("LoadBalancer %s not found", e.LoadBalancerId) + return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) +} + +// RouteNotFoundError results from a failed seach for a route by Id +type RouteNotFoundError struct { + RouteId string +} + +func (e RouteNotFoundError) Error() string { + return fmt.Sprintf("SDN Route %q not found", e.RouteId) } // IsNotFoundError returns a boolean indicating whether the error was caused by // a resource not being found. func IsNotFoundError(err error) bool { - switch err.(type) { + switch pe := err.(type) { case NetworkNotFoundError: return true case EndpointNotFoundError: @@ -90,6 +155,10 @@ func IsNotFoundError(err error) bool { return true case LoadBalancerNotFoundError: return true + case RouteNotFoundError: + return true + case *hcserror.HcsError: + return pe.Err == hcs.ErrElementNotFound } return false } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go index 29d13deac..14903bc5e 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go @@ -3,6 +3,7 @@ package hcn import ( "encoding/json" "fmt" + "math" "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/interop" @@ -20,17 +21,67 @@ type Version struct { Minor int `json:"Minor"` } +type VersionRange struct { + MinVersion Version + MaxVersion Version +} + +type VersionRanges []VersionRange + var ( // HNSVersion1803 added ACL functionality. - HNSVersion1803 = Version{Major: 7, Minor: 2} + HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} // V2ApiSupport allows the use of V2 Api calls and V2 Schema. - V2ApiSupport = Version{Major: 9, Minor: 1} + V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} // Remote Subnet allows for Remote Subnet policies on Overlay networks - RemoteSubnetVersion = Version{Major: 9, Minor: 2} + RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} // A Host Route policy allows for local container to local host communication Overlay networks - HostRouteVersion = Version{Major: 9, Minor: 2} - // HNS 10.2 allows for Direct Server Return for loadbalancing - DSRVersion = Version{Major: 10, Minor: 2} + HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing + DSRVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes + Slash32EndpointPrefixesVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN + AclSupportForProtocol252Version = VersionRanges{ + VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 12.0 allows for session affinity for loadbalancing + SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 11.10+ supports Ipv6 dual stack. + IPv6DualStackVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 13.0 allows for Set Policy support + SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 10.3 allows for VXLAN ports + VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support + L4ProxyPolicyVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + + //HNS 13.2 allows for L4WfpProxy Policy support + L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 14.0 allows for TierAcl Policy support + TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 15.0 allows for NetworkACL Policy support + NetworkACLPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 15.0 allows for NestedIpSet support + NestedIpSetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} ) // GetGlobals returns the global properties of the HCN Service. @@ -60,7 +111,7 @@ func hnsCall(method, path, request string, returnResponse interface{}) error { err := _hnsCall(method, path, request, &responseBuffer) if err != nil { - return hcserror.New(err, "hnsCall ", "") + return hcserror.New(err, "hnsCall", "") } response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go index cff68e135..1b434b07b 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go @@ -3,17 +3,18 @@ package hcn import ( "encoding/json" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" ) // LoadBalancerPortMapping is associated with HostComputeLoadBalancer type LoadBalancerPortMapping struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - Flags LoadBalancerPortMappingFlags `json:",omitempty"` + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + DistributionType LoadBalancerDistribution `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2 + Flags LoadBalancerPortMappingFlags `json:",omitempty"` } // HostComputeLoadBalancer represents software load balancer. @@ -35,6 +36,7 @@ var ( LoadBalancerFlagsNone LoadBalancerFlags = 0 // LoadBalancerFlagsDSR enables Direct Server Return (DSR) LoadBalancerFlagsDSR LoadBalancerFlags = 1 + LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 ) // LoadBalancerPortMappingFlags are special settings on a loadbalancer. @@ -53,6 +55,18 @@ var ( LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 ) +// LoadBalancerDistribution specifies how the loadbalancer distributes traffic. +type LoadBalancerDistribution uint32 + +var ( + // LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod. + LoadBalancerDistributionNone LoadBalancerDistribution + // LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod. + LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1 + // LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod. + LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 +) + func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { // Open loadBalancer. var ( @@ -147,49 +161,11 @@ func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { return &outputLoadBalancer, nil } -func modifyLoadBalancer(loadBalancerId string, settings string) (*HostComputeLoadBalancer, error) { - loadBalancerGuid := guid.FromString(loadBalancerId) - // Open loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Modify loadBalancer. - hr = hcnModifyLoadBalancer(loadBalancerHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) +func deleteLoadBalancer(loadBalancerId string) error { + loadBalancerGuid, err := guid.FromString(loadBalancerId) if err != nil { - return nil, err - } - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err + return errInvalidLoadBalancerID } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to LoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func deleteLoadBalancer(loadBalancerId string) error { - loadBalancerGuid := guid.FromString(loadBalancerId) var resultBuffer *uint16 hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go index 6dbef4f25..d2ef22960 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go @@ -5,8 +5,8 @@ import ( "os" "syscall" + "github.com/Microsoft/go-winio/pkg/guid" icni "github.com/Microsoft/hcsshim/internal/cni" - "github.com/Microsoft/hcsshim/internal/guid" "github.com/Microsoft/hcsshim/internal/interop" "github.com/Microsoft/hcsshim/internal/regstate" "github.com/Microsoft/hcsshim/internal/runhcs" @@ -165,7 +165,10 @@ func createNamespace(settings string) (*HostComputeNamespace, error) { } func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { - namespaceGuid := guid.FromString(namespaceId) + namespaceGuid, err := guid.FromString(namespaceId) + if err != nil { + return nil, errInvalidNamespaceID + } // Open namespace. var ( namespaceHandle hcnNamespace @@ -206,7 +209,10 @@ func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace } func deleteNamespace(namespaceId string) error { - namespaceGuid := guid.FromString(namespaceId) + namespaceGuid, err := guid.FromString(namespaceId) + if err != nil { + return errInvalidNamespaceID + } var resultBuffer *uint16 hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { @@ -241,7 +247,23 @@ func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) // GetNamespaceByID returns the Namespace specified by Id. func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { - return getNamespace(guid.FromString(namespaceId), defaultQueryJson()) + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": namespaceId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + namespaces, err := ListNamespacesQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(namespaces) == 0 { + return nil, NamespaceNotFoundError{NamespaceID: namespaceId} + } + + return &namespaces[0], err } // GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. @@ -356,7 +378,7 @@ func (namespace *HostComputeNamespace) Sync() error { // The shim is likey gone. Simply ignore the sync as if it didn't exist. if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { // Remove the reg key there is no point to try again - cfg.Remove() + _ = cfg.Remove() return nil } f := map[string]interface{}{ diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go index b5f1db8b2..c36b13638 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go @@ -2,27 +2,28 @@ package hcn import ( "encoding/json" + "errors" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" ) -// Route is assoicated with a subnet. +// Route is associated with a subnet. type Route struct { NextHop string `json:",omitempty"` DestinationPrefix string `json:",omitempty"` Metric uint16 `json:",omitempty"` } -// Subnet is assoicated with a Ipam. +// Subnet is associated with a Ipam. type Subnet struct { IpAddressPrefix string `json:",omitempty"` Policies []json.RawMessage `json:",omitempty"` Routes []Route `json:",omitempty"` } -// Ipam (Internet Protocol Addres Management) is assoicated with a network +// Ipam (Internet Protocol Address Management) is associated with a network // and represents the address space(s) of a network. type Ipam struct { Type string `json:",omitempty"` // Ex: Static, DHCP @@ -35,12 +36,12 @@ type MacRange struct { EndMacAddress string `json:",omitempty"` } -// MacPool is assoicated with a network and represents pool of MacRanges. +// MacPool is associated with a network and represents pool of MacRanges. type MacPool struct { Ranges []MacRange `json:",omitempty"` } -// Dns (Domain Name System is associated with a network. +// Dns (Domain Name System is associated with a network). type Dns struct { Domain string `json:",omitempty"` Search []string `json:",omitempty"` @@ -81,6 +82,7 @@ type HostComputeNetwork struct { Dns Dns `json:",omitempty"` Ipams []Ipam `json:",omitempty"` Flags NetworkFlags `json:",omitempty"` // 0: None + Health Health `json:",omitempty"` SchemaVersion SchemaVersion `json:",omitempty"` } @@ -132,6 +134,12 @@ func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error } // Convert output to HostComputeNetwork var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { return nil, err } @@ -196,6 +204,12 @@ func createNetwork(settings string) (*HostComputeNetwork, error) { } // Convert output to HostComputeNetwork var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { return nil, err } @@ -203,7 +217,10 @@ func createNetwork(settings string) (*HostComputeNetwork, error) { } func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { - networkGuid := guid.FromString(networkId) + networkGuid, err := guid.FromString(networkId) + if err != nil { + return nil, errInvalidNetworkID + } // Open Network var ( networkHandle hcnNetwork @@ -237,6 +254,12 @@ func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, erro } // Convert output to HostComputeNetwork var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { return nil, err } @@ -244,7 +267,10 @@ func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, erro } func deleteNetwork(networkId string) error { - networkGuid := guid.FromString(networkId) + networkGuid, err := guid.FromString(networkId) + if err != nil { + return errInvalidNetworkID + } var resultBuffer *uint16 hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { @@ -320,6 +346,24 @@ func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { // Create Network. func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) + for _, ipam := range network.Ipams { + for _, subnet := range ipam.Subnets { + if subnet.IpAddressPrefix != "" { + hasDefault := false + for _, route := range subnet.Routes { + if route.NextHop == "" { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { + hasDefault = true + } + } + if !hasDefault { + return nil, errors.New("network create error, no default gateway") + } + } + } + } jsonString, err := json.Marshal(network) if err != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go index 6b12d73c6..c2aa599f3 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go @@ -1,6 +1,8 @@ package hcn -import "encoding/json" +import ( + "encoding/json" +) // EndpointPolicyType are the potential Policies that apply to Endpoints. type EndpointPolicyType string @@ -14,11 +16,14 @@ const ( OutBoundNAT EndpointPolicyType = "OutBoundNAT" SDNRoute EndpointPolicyType = "SDNRoute" L4Proxy EndpointPolicyType = "L4Proxy" + L4WFPPROXY EndpointPolicyType = "L4WFPPROXY" PortName EndpointPolicyType = "PortName" EncapOverhead EndpointPolicyType = "EncapOverhead" + IOV EndpointPolicyType = "Iov" // Endpoint and Network have InterfaceConstraint and ProviderAddress NetworkProviderAddress EndpointPolicyType = "ProviderAddress" NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" + TierAcl EndpointPolicyType = "TierAcl" ) // EndpointPolicy is a collection of Policy settings for an Endpoint. @@ -40,7 +45,12 @@ const ( InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" ProviderAddress NetworkPolicyType = "ProviderAddress" RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" + VxlanPort NetworkPolicyType = "VxlanPort" HostRoute NetworkPolicyType = "HostRoute" + SetPolicy NetworkPolicyType = "SetPolicy" + NetworkL4Proxy NetworkPolicyType = "L4Proxy" + LayerConstraint NetworkPolicyType = "LayerConstraint" + NetworkACL NetworkPolicyType = "NetworkACL" ) // NetworkPolicy is a collection of Policy settings for a Network. @@ -64,14 +74,24 @@ type SubnetPolicy struct { Settings json.RawMessage `json:",omitempty"` } +// NatFlags are flags for portmappings. +type NatFlags uint32 + +const ( + NatFlagsNone NatFlags = iota + NatFlagsLocalRoutedVip + NatFlagsIPv6 +) + /// Endpoint Policy objects // PortMappingPolicySetting defines Port Mapping (NAT) type PortMappingPolicySetting struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - VIP string `json:",omitempty"` + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + VIP string `json:",omitempty"` + Flags NatFlags `json:",omitempty"` } // ActionType associated with ACLs. Value is either Allow or Block. @@ -88,6 +108,8 @@ const ( ActionTypeAllow ActionType = "Allow" // Block traffic ActionTypeBlock ActionType = "Block" + // Pass traffic + ActionTypePass ActionType = "Pass" // In is traffic coming to the Endpoint DirectionTypeIn DirectionType = "In" @@ -111,7 +133,7 @@ type AclPolicySetting struct { RemotePorts string `json:",omitempty"` RuleType RuleType `json:",omitempty"` Priority uint16 `json:",omitempty"` -} +} // QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. type QosPolicySetting struct { @@ -120,8 +142,10 @@ type QosPolicySetting struct { // OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. type OutboundNatPolicySetting struct { - VirtualIP string `json:",omitempty"` - Exceptions []string `json:",omitempty"` + VirtualIP string `json:",omitempty"` + Exceptions []string `json:",omitempty"` + Destinations []string `json:",omitempty"` + Flags NatFlags `json:",omitempty"` } // SDNRoutePolicySetting sets SDN Route on an Endpoint. @@ -131,14 +155,43 @@ type SDNRoutePolicySetting struct { NeedEncap bool `json:",omitempty"` } -// L4ProxyPolicySetting sets Layer-4 Proxy on an endpoint. -type L4ProxyPolicySetting struct { - IP string `json:",omitempty"` - Port string `json:",omitempty"` - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - ExceptionList []string `json:",omitempty"` - Destination string `json:","` - OutboundNat bool `json:",omitempty"` +// NetworkACLPolicySetting creates ACL rules on a network +type NetworkACLPolicySetting struct { + Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) + Action ActionType `json:","` + Direction DirectionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + RuleType RuleType `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// FiveTuple is nested in L4ProxyPolicySetting for WFP support. +type FiveTuple struct { + Protocols string `json:",omitempty"` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// ProxyExceptions exempts traffic to IpAddresses and Ports +type ProxyExceptions struct { + IpAddressExceptions []string `json:",omitempty"` + PortExceptions []string `json:",omitempty"` +} + +// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint. +type L4WfpProxyPolicySetting struct { + InboundProxyPort string `json:",omitempty"` + OutboundProxyPort string `json:",omitempty"` + FilterTuple FiveTuple `json:",omitempty"` + UserSID string `json:",omitempty"` + InboundExceptions ProxyExceptions `json:",omitempty"` + OutboundExceptions ProxyExceptions `json:",omitempty"` } // PortnameEndpointPolicySetting sets the port name for an endpoint. @@ -151,6 +204,13 @@ type EncapOverheadEndpointPolicySetting struct { Overhead uint16 `json:",omitempty"` } +// IovPolicySetting sets the Iov settings for an endpoint. +type IovPolicySetting struct { + IovOffloadWeight uint32 `json:",omitempty"` + QueuePairsRequested uint32 `json:",omitempty"` + InterruptModeration uint32 `json:",omitempty"` +} + /// Endpoint and Network Policy objects // ProviderAddressEndpointPolicySetting sets the PA for an endpoint. @@ -196,6 +256,10 @@ type AutomaticDNSNetworkPolicySetting struct { Enable bool `json:",omitempty"` } +type LayerConstraintNetworkPolicySetting struct { + LayerId string `json:",omitempty"` +} + /// Subnet Policy objects // VlanPolicySetting isolates a subnet with VLAN tagging. @@ -215,3 +279,66 @@ type RemoteSubnetRoutePolicySetting struct { ProviderAddress string DistributedRouterMacAddress string } + +// SetPolicyTypes associated with SetPolicy. Value is IPSET. +type SetPolicyType string + +const ( + SetPolicyTypeIpSet SetPolicyType = "IPSET" + SetPolicyTypeNestedIpSet SetPolicyType = "NESTEDIPSET" +) + +// SetPolicySetting creates IPSets on network +type SetPolicySetting struct { + Id string + Name string + Type SetPolicyType + Values string +} + +// VxlanPortPolicySetting allows configuring the VXLAN TCP port +type VxlanPortPolicySetting struct { + Port uint16 +} + +// ProtocolType associated with L4ProxyPolicy +type ProtocolType uint32 + +const ( + ProtocolTypeUnknown ProtocolType = 0 + ProtocolTypeICMPv4 ProtocolType = 1 + ProtocolTypeIGMP ProtocolType = 2 + ProtocolTypeTCP ProtocolType = 6 + ProtocolTypeUDP ProtocolType = 17 + ProtocolTypeICMPv6 ProtocolType = 58 +) + +//L4ProxyPolicySetting applies proxy policy on network/endpoint +type L4ProxyPolicySetting struct { + IP string `json:",omitempty"` + Port string `json:",omitempty"` + Protocol ProtocolType `json:",omitempty"` + Exceptions []string `json:",omitempty"` + Destination string + OutboundNAT bool `json:",omitempty"` +} + +// TierAclRule represents an ACL within TierAclPolicySetting +type TierAclRule struct { + Id string `json:",omitempty"` + Protocols string `json:",omitempty"` + TierAclRuleAction ActionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// TierAclPolicySetting represents a Tier containing ACLs +type TierAclPolicySetting struct { + Name string `json:","` + Direction DirectionType `json:","` + Order uint16 `json:""` + TierAclRules []TierAclRule `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go new file mode 100644 index 000000000..52e249846 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go @@ -0,0 +1,266 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// HostComputeRoute represents SDN routes. +type HostComputeRoute struct { + ID string `json:"ID,omitempty"` + HostComputeEndpoints []string `json:",omitempty"` + Setting []SDNRoutePolicySetting `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// ListRoutes makes a call to list all available routes. +func ListRoutes() ([]HostComputeRoute, error) { + hcnQuery := defaultQuery() + routes, err := ListRoutesQuery(hcnQuery) + if err != nil { + return nil, err + } + return routes, nil +} + +// ListRoutesQuery makes a call to query the list of available routes. +func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) { + queryJSON, err := json.Marshal(query) + if err != nil { + return nil, err + } + + routes, err := enumerateRoutes(string(queryJSON)) + if err != nil { + return nil, err + } + return routes, nil +} + +// GetRouteByID returns the route specified by Id. +func GetRouteByID(routeID string) (*HostComputeRoute, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": routeID} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + routes, err := ListRoutesQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(routes) == 0 { + return nil, RouteNotFoundError{RouteId: routeID} + } + return &routes[0], err +} + +// Create Route. +func (route *HostComputeRoute) Create() (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID) + + jsonString, err := json.Marshal(route) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString) + route, hcnErr := createRoute(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return route, nil +} + +// Delete Route. +func (route *HostComputeRoute) Delete() error { + logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID) + + existingRoute, _ := GetRouteByID(route.ID) + + if existingRoute != nil { + if err := deleteRoute(route.ID); err != nil { + return err + } + } + + return nil +} + +// AddEndpoint add an endpoint to a route +// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add +func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) + + err := route.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) + + return route.Create() +} + +// RemoveEndpoint removes an endpoint from a route +// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add +func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) + + err := route.Delete() + if err != nil { + return nil, err + } + + // Create a list of all the endpoints besides the one being removed + i := 0 + for index, endpointReference := range route.HostComputeEndpoints { + if endpointReference == endpoint.Id { + i = index + break + } + } + + route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...) + return route.Create() +} + +// AddRoute for the specified endpoints and SDN Route setting +func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) + + if len(endpoints) <= 0 { + return nil, errors.New("missing endpoints") + } + + route := &HostComputeRoute{ + SchemaVersion: V2SchemaVersion(), + Setting: []SDNRoutePolicySetting{ + { + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + NeedEncap: needEncapsulation, + }, + }, + } + + for _, endpoint := range endpoints { + route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) + } + + return route.Create() +} + +func enumerateRoutes(query string) ([]HostComputeRoute, error) { + // Enumerate all routes Guids + var ( + resultBuffer *uint16 + routeBuffer *uint16 + ) + hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil { + return nil, err + } + + routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer) + var routeIds []guid.GUID + if err := json.Unmarshal([]byte(routes), &routeIds); err != nil { + return nil, err + } + + var outputRoutes []HostComputeRoute + for _, routeGUID := range routeIds { + route, err := getRoute(routeGUID, query) + if err != nil { + return nil, err + } + outputRoutes = append(outputRoutes, *route) + } + return outputRoutes, nil +} + +func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) { + // Open routes. + var ( + routeHandle hcnRoute + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer) + if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil { + return nil, err + } + // Query routes. + hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close routes. + hr = hcnCloseRoute(routeHandle) + if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeRoute + var outputRoute HostComputeRoute + if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { + return nil, err + } + return &outputRoute, nil +} + +func createRoute(settings string) (*HostComputeRoute, error) { + // Create new route. + var ( + routeHandle hcnRoute + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + routeGUID := guid.GUID{} + hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer) + if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil { + return nil, err + } + // Query route. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close Route. + hr = hcnCloseRoute(routeHandle) + if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeRoute + var outputRoute HostComputeRoute + if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { + return nil, err + } + return &outputRoute, nil +} + +func deleteRoute(routeID string) error { + routeGUID, err := guid.FromString(routeID) + if err != nil { + return errInvalidRouteID + } + var resultBuffer *uint16 + hr := hcnDeleteRoute(&routeGUID, &resultBuffer) + if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go index 9b5df2030..bacb91fed 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go @@ -1,16 +1,39 @@ package hcn import ( + "fmt" + "sync" + + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +var ( + // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work + // multiple times. + featuresOnce sync.Once + featuresErr error + supportedFeatures SupportedFeatures +) + // SupportedFeatures are the features provided by the Service. type SupportedFeatures struct { - Acl AclFeatures `json:"ACL"` - Api ApiSupport `json:"API"` - RemoteSubnet bool `json:"RemoteSubnet"` - HostRoute bool `json:"HostRoute"` - DSR bool `json:"DSR"` + Acl AclFeatures `json:"ACL"` + Api ApiSupport `json:"API"` + RemoteSubnet bool `json:"RemoteSubnet"` + HostRoute bool `json:"HostRoute"` + DSR bool `json:"DSR"` + Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` + AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` + SessionAffinity bool `json:"SessionAffinity"` + IPv6DualStack bool `json:"IPv6DualStack"` + SetPolicy bool `json:"SetPolicy"` + VxlanPort bool `json:"VxlanPort"` + L4Proxy bool `json:"L4Proxy"` // network policy that applies VFP rules to all endpoints on the network to redirect traffic + L4WfpProxy bool `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint + TierAcl bool `json:"TierAcl"` + NetworkACL bool `json:"NetworkACL"` + NestedIpSet bool `json:"NestedIpSet"` } // AclFeatures are the supported ACL possibilities. @@ -27,17 +50,41 @@ type ApiSupport struct { V2 bool `json:"V2"` } +// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called +// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the +// various hcn.IsXSupported methods need to be made. +func GetCachedSupportedFeatures() (SupportedFeatures, error) { + // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to + // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant + // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy + // for example. + featuresOnce.Do(func() { + supportedFeatures, featuresErr = getSupportedFeatures() + }) + + return supportedFeatures, featuresErr +} + // GetSupportedFeatures returns the features supported by the Service. +// +// Deprecated: Use GetCachedSupportedFeatures instead. func GetSupportedFeatures() SupportedFeatures { - var features SupportedFeatures - - globals, err := GetGlobals() + features, err := GetCachedSupportedFeatures() if err != nil { // Expected on pre-1803 builds, all features will be false/unsupported - logrus.Debugf("Unable to obtain globals: %s", err) + logrus.WithError(err).Errorf("unable to obtain supported features") return features } + return features +} +func getSupportedFeatures() (SupportedFeatures, error) { + var features SupportedFeatures + globals, err := GetGlobals() + if err != nil { + // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. + return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") + } features.Acl = AclFeatures{ AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), @@ -53,18 +100,47 @@ func GetSupportedFeatures() SupportedFeatures { features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) features.DSR = isFeatureSupported(globals.Version, DSRVersion) + features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) + features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) + features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) + features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion) + features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion) + features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion) + features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion) + features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion) + features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion) + features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion) + features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion) - return features + logrus.WithFields(logrus.Fields{ + "version": fmt.Sprintf("%+v", globals.Version), + "supportedFeatures": fmt.Sprintf("%+v", features), + }).Info("HCN feature check") + + return features, nil +} + +func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { + isFeatureSupported := false + + for _, versionRange := range versionsSupported { + isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange) + } + + return isFeatureSupported } -func isFeatureSupported(currentVersion Version, minVersionSupported Version) bool { - if currentVersion.Major < minVersionSupported.Major { +func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool { + if currentVersion.Major < versionRange.MinVersion.Major { return false } - if currentVersion.Major > minVersionSupported.Major { - return true + if currentVersion.Major > versionRange.MaxVersion.Major { + return false + } + if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor { + return false } - if currentVersion.Minor < minVersionSupported.Minor { + if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor { return false } return true diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go index 856b2c140..7ec5b58b6 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go @@ -71,10 +71,13 @@ var ( procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") - procHcnOpenService = modcomputenetwork.NewProc("HcnOpenService") - procHcnRegisterServiceCallback = modcomputenetwork.NewProc("HcnRegisterServiceCallback") - procHcnUnregisterServiceCallback = modcomputenetwork.NewProc("HcnUnregisterServiceCallback") - procHcnCloseService = modcomputenetwork.NewProc("HcnCloseService") + procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes") + procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") + procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") + procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute") + procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties") + procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") + procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") ) func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { @@ -657,11 +660,20 @@ func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { return } -func hcnOpenService(service *hcnService, result **uint16) (hr error) { - if hr = procHcnOpenService.Find(); hr != nil { +func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateRoutes(_p0, routes, result) +} + +func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnOpenService.Addr(), 2, uintptr(unsafe.Pointer(service)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -671,11 +683,80 @@ func hcnOpenService(service *hcnService, result **uint16) (hr error) { return } -func hcnRegisterServiceCallback(service hcnService, callback int32, context int32, callbackHandle *hcnCallbackHandle) (hr error) { - if hr = procHcnRegisterServiceCallback.Find(); hr != nil { +func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateRoute(id, _p0, route, result) +} + +func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { + if hr = procHcnCreateSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { + if hr = procHcnOpenSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyRoute(route, _p0, result) +} + +func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifySdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryRouteProperties(route, _p0, properties, result) +} + +func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnRegisterServiceCallback.Addr(), 4, uintptr(service), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -685,11 +766,11 @@ func hcnRegisterServiceCallback(service hcnService, callback int32, context int3 return } -func hcnUnregisterServiceCallback(callbackHandle hcnCallbackHandle) (hr error) { - if hr = procHcnUnregisterServiceCallback.Find(); hr != nil { +func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteSdnRoute.Find(); hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnUnregisterServiceCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -699,11 +780,11 @@ func hcnUnregisterServiceCallback(callbackHandle hcnCallbackHandle) (hr error) { return } -func hcnCloseService(service hcnService) (hr error) { - if hr = procHcnCloseService.Find(); hr != nil { +func hcnCloseRoute(route hcnRoute) (hr error) { + if hr = procHcnCloseSdnRoute.Find(); hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnCloseService.Addr(), 1, uintptr(service), 0, 0) + r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index eb013d2c4..9e0059447 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -7,6 +7,9 @@ import ( // HNSEndpoint represents a network endpoint in HNS type HNSEndpoint = hns.HNSEndpoint +// HNSEndpointStats represent the stats for an networkendpoint in HNS +type HNSEndpointStats = hns.EndpointStats + // Namespace represents a Compartment. type Namespace = hns.Namespace @@ -39,11 +42,27 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) { // HotAttachEndpoint makes a HCS Call to attach the endpoint to the container func HotAttachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if isAttached { + return err + } return modifyNetworkEndpoint(containerID, endpointID, Add) } // HotDetachEndpoint makes a HCS Call to detach the endpoint from the container func HotDetachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if !isAttached { + return err + } return modifyNetworkEndpoint(containerID, endpointID, Remove) } @@ -92,3 +111,8 @@ func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { return hns.GetHNSEndpointByName(endpointName) } + +// GetHNSEndpointStats gets the endpoint stats by ID +func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { + return hns.GetHNSEndpointStats(endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go index a3e03ff8f..00ab26364 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -21,8 +21,11 @@ const ( OutboundNat = hns.OutboundNat ExternalLoadBalancer = hns.ExternalLoadBalancer Route = hns.Route + Proxy = hns.Proxy ) +type ProxyPolicy = hns.ProxyPolicy + type NatPolicy = hns.NatPolicy type QosPolicy = hns.QosPolicy diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go index 5b91e0cc5..300eb5996 100644 --- a/vendor/github.com/Microsoft/hcsshim/interface.go +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -4,7 +4,7 @@ import ( "io" "time" - "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" ) // ProcessConfig is used as both the input of Container.CreateProcess diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go index 842ee1f7a..4a4fcea84 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go @@ -3,7 +3,7 @@ package cni import ( "errors" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/regstate" ) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go new file mode 100644 index 000000000..27a62a723 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -0,0 +1,91 @@ +package cow + +import ( + "context" + "io" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Process is the interface for an OS process running in a container or utility VM. +type Process interface { + // Close releases resources associated with the process and closes the + // writer and readers returned by Stdio. Depending on the implementation, + // this may also terminate the process. + Close() error + // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever + // is appropriate to indicate that no more data is available. + CloseStdin(ctx context.Context) error + // CloseStdout closes the stdout connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStdout(ctx context.Context) error + // CloseStderr closes the stderr connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStderr(ctx context.Context) error + // Pid returns the process ID. + Pid() int + // Stdio returns the stdio streams for a process. These may be nil if a stream + // was not requested during CreateProcess. + Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) + // ResizeConsole resizes the virtual terminal associated with the process. + ResizeConsole(ctx context.Context, width, height uint16) error + // Kill sends a SIGKILL or equivalent signal to the process and returns whether + // the signal was delivered. It does not wait for the process to terminate. + Kill(ctx context.Context) (bool, error) + // Signal sends a signal to the process and returns whether the signal was + // delivered. The input is OS specific (either + // guestrequest.SignalProcessOptionsWCOW or + // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process + // to terminate. + Signal(ctx context.Context, options interface{}) (bool, error) + // Wait waits for the process to complete, or for a connection to the process to be + // terminated by some error condition (including calling Close). + Wait() error + // ExitCode returns the exit code of the process. Returns an error if the process is + // not running. + ExitCode() (int, error) +} + +// ProcessHost is the interface for creating processes. +type ProcessHost interface { + // CreateProcess creates a process. The configuration is host specific + // (either hcsschema.ProcessParameters or lcow.ProcessParameters). + CreateProcess(ctx context.Context, config interface{}) (Process, error) + // OS returns the host's operating system, "linux" or "windows". + OS() string + // IsOCI specifies whether this is an OCI-compliant process host. If true, + // then the configuration passed to CreateProcess should have an OCI process + // spec (or nil if this is the initial process in an OCI container). + // Otherwise, it should have the HCS-specific process parameters. + IsOCI() bool +} + +// Container is the interface for container objects, either running on the host or +// in a utility VM. +type Container interface { + ProcessHost + // Close releases the resources associated with the container. Depending on + // the implementation, this may also terminate the container. + Close() error + // ID returns the container ID. + ID() string + // Properties returns the requested container properties targeting a V1 schema container. + Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) + // PropertiesV2 returns the requested container properties targeting a V2 schema container. + PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) + // Start starts a container. + Start(ctx context.Context) error + // Shutdown sends a shutdown request to the container (but does not wait for + // the shutdown to complete). + Shutdown(ctx context.Context) error + // Terminate sends a terminate request to the container (but does not wait + // for the terminate to complete). + Terminate(ctx context.Context) error + // Wait waits for the container to terminate, or for the connection to the + // container to be terminated by some error condition (including calling + // Close). + Wait() error + // Modify sends a request to modify container resources + Modify(ctx context.Context, config interface{}) error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go deleted file mode 100644 index 5d3d0dfef..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go +++ /dev/null @@ -1,100 +0,0 @@ -package guestrequest - -import ( - "github.com/Microsoft/hcsshim/internal/schema2" -) - -// Arguably, many of these (at least CombinedLayers) should have been generated -// by swagger. -// -// This will also change package name due to an inbound breaking change. - -// This class is used by a modify request to add or remove a combined layers -// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath -// using the specified layers as the parent content. Ignores property ScratchPath -// since the container path is already the scratch path. For linux, the GCS unions -// the specified layers and ScratchPath together, placing the resulting union -// filesystem at ContainerRootPath. -type CombinedLayers struct { - ContainerRootPath string `json:"ContainerRootPath,omitempty"` - Layers []hcsschema.Layer `json:"Layers,omitempty"` - ScratchPath string `json:"ScratchPath,omitempty"` -} - -// Defines the schema for hosted settings passed to GCS and/or OpenGCS - -// SCSI. Scratch space for remote file-system commands, or R/W layer for containers -type LCOWMappedVirtualDisk struct { - MountPath string `json:"MountPath,omitempty"` // /tmp/scratch for an LCOW utility VM being used as a service VM - Lun uint8 `json:"Lun,omitempty"` - Controller uint8 `json:"Controller,omitempty"` - ReadOnly bool `json:"ReadOnly,omitempty"` -} - -type WCOWMappedVirtualDisk struct { - ContainerPath string `json:"ContainerPath,omitempty"` - Lun int32 `json:"Lun,omitempty"` -} - -type LCOWMappedDirectory struct { - MountPath string `json:"MountPath,omitempty"` - Port int32 `json:"Port,omitempty"` - ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported) - ReadOnly bool `json:"ReadOnly,omitempty"` -} - -// Read-only layers over VPMem -type LCOWMappedVPMemDevice struct { - DeviceNumber uint32 `json:"DeviceNumber,omitempty"` - MountPath string `json:"MountPath,omitempty"` // /tmp/pN -} - -type LCOWNetworkAdapter struct { - NamespaceID string `json:",omitempty"` - ID string `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - EnableLowMetric bool `json:",omitempty"` - EncapOverhead uint16 `json:",omitempty"` -} - -type ResourceType string - -const ( - // These are constants for v2 schema modify guest requests. - ResourceTypeMappedDirectory ResourceType = "MappedDirectory" - ResourceTypeMappedVirtualDisk ResourceType = "MappedVirtualDisk" - ResourceTypeNetwork ResourceType = "Network" - ResourceTypeNetworkNamespace ResourceType = "NetworkNamespace" - ResourceTypeCombinedLayers ResourceType = "CombinedLayers" - ResourceTypeVPMemDevice ResourceType = "VPMemDevice" -) - -// GuestRequest is for modify commands passed to the guest. -type GuestRequest struct { - RequestType string `json:"RequestType,omitempty"` - ResourceType ResourceType `json:"ResourceType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -type NetworkModifyRequest struct { - AdapterId string `json:"AdapterId,omitempty"` - RequestType string `json:"RequestType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -type RS4NetworkModifyRequest struct { - AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` - RequestType string `json:"RequestType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} - -// SignalProcessOptions is the options passed to either WCOW or LCOW -// to signal a given process. -type SignalProcessOptions struct { - Signal int `json:,omitempty` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go b/vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go deleted file mode 100644 index e9e45c030..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go +++ /dev/null @@ -1,69 +0,0 @@ -package guid - -import ( - "crypto/rand" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -var _ = (json.Marshaler)(&GUID{}) -var _ = (json.Unmarshaler)(&GUID{}) - -type GUID [16]byte - -func New() GUID { - g := GUID{} - _, err := io.ReadFull(rand.Reader, g[:]) - if err != nil { - panic(err) - } - return g -} - -func (g GUID) String() string { - return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:]) -} - -func FromString(s string) GUID { - if len(s) != 36 { - panic(fmt.Sprintf("invalid GUID length: %d", len(s))) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - panic("invalid GUID format") - } - indexOrder := [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } - byteOrder := [16]int{ - 3, 2, 1, 0, - 5, 4, - 7, 6, - 8, 9, - 10, 11, 12, 13, 14, 15, - } - var g GUID - for i, x := range indexOrder { - b, err := strconv.ParseInt(s[x:x+2], 16, 16) - if err != nil { - panic(err) - } - g[byteOrder[i]] = byte(b) - } - return g -} - -func (g GUID) MarshalJSON() ([]byte, error) { - return json.Marshal(g.String()) -} - -func (g *GUID) UnmarshalJSON(data []byte) error { - *g = FromString(strings.Trim(string(data), "\"")) - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go index f9a922a4b..d13772b03 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -1,16 +1,19 @@ package hcs import ( + "fmt" "sync" "syscall" "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vmcompute" "github.com/sirupsen/logrus" ) var ( nextCallback uintptr - callbackMap = map[uintptr]*notifcationWatcherContext{} + callbackMap = map[uintptr]*notificationWatcherContext{} callbackMapLock = sync.RWMutex{} notificationWatcherCallback = syscall.NewCallback(notificationWatcher) @@ -40,35 +43,84 @@ var ( ) type hcsNotification uint32 + +func (hn hcsNotification) String() string { + switch hn { + case hcsNotificationSystemExited: + return "SystemExited" + case hcsNotificationSystemCreateCompleted: + return "SystemCreateCompleted" + case hcsNotificationSystemStartCompleted: + return "SystemStartCompleted" + case hcsNotificationSystemPauseCompleted: + return "SystemPauseCompleted" + case hcsNotificationSystemResumeCompleted: + return "SystemResumeCompleted" + case hcsNotificationSystemCrashReport: + return "SystemCrashReport" + case hcsNotificationSystemSiloJobCreated: + return "SystemSiloJobCreated" + case hcsNotificationSystemSaveCompleted: + return "SystemSaveCompleted" + case hcsNotificationSystemRdpEnhancedModeStateChanged: + return "SystemRdpEnhancedModeStateChanged" + case hcsNotificationSystemShutdownFailed: + return "SystemShutdownFailed" + case hcsNotificationSystemGetPropertiesCompleted: + return "SystemGetPropertiesCompleted" + case hcsNotificationSystemModifyCompleted: + return "SystemModifyCompleted" + case hcsNotificationSystemCrashInitiated: + return "SystemCrashInitiated" + case hcsNotificationSystemGuestConnectionClosed: + return "SystemGuestConnectionClosed" + case hcsNotificationProcessExited: + return "ProcessExited" + case hcsNotificationInvalid: + return "Invalid" + case hcsNotificationServiceDisconnect: + return "ServiceDisconnect" + default: + return fmt.Sprintf("Unknown: %d", hn) + } +} + type notificationChannel chan error -type notifcationWatcherContext struct { +type notificationWatcherContext struct { channels notificationChannels - handle hcsCallback + handle vmcompute.HcsCallback + + systemID string + processID int } type notificationChannels map[hcsNotification]notificationChannel -func newChannels() notificationChannels { +func newSystemChannels() notificationChannels { channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationSystemExited, + hcsNotificationSystemCreateCompleted, + hcsNotificationSystemStartCompleted, + hcsNotificationSystemPauseCompleted, + hcsNotificationSystemResumeCompleted, + hcsNotificationSystemSaveCompleted, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} - channels[hcsNotificationSystemExited] = make(notificationChannel, 1) - channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1) - channels[hcsNotificationProcessExited] = make(notificationChannel, 1) - channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1) - channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1) - channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1) - channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1) - channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1) - channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1) - channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1) - +func newProcessChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationProcessExited, + } { + channels[notif] = make(notificationChannel, 1) + } return channels } @@ -92,12 +144,17 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt return 0 } + log := logrus.WithFields(logrus.Fields{ + "notification-type": notificationType.String(), + "system-id": context.systemID, + }) + if context.processID != 0 { + log.Data[logfields.ProcessID] = context.processID + } + log.Debug("HCS notification") + if channel, ok := context.channels[notificationType]; ok { channel <- result - } else { - logrus.WithFields(logrus.Fields{ - "notification-type": notificationType, - }).Warn("Received a callback of an unsupported type") } return 0 diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go deleted file mode 100644 index 3669c34aa..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -package hcs - -import "C" - -// This import is needed to make the library compile as CGO because HCSSHIM -// only works with CGO due to callbacks from HCS comming back from a C thread -// which is not supported without CGO. See https://github.com/golang/go/issues/10973 diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 079b56535..e21354ffd 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -1,14 +1,14 @@ package hcs import ( + "context" "encoding/json" "errors" "fmt" + "net" "syscall" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/log" ) var ( @@ -60,7 +60,7 @@ var ( // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) - // ErrProcNotFound is an error encountered when the the process cannot be found + // ErrProcNotFound is an error encountered when a procedure look up fails. ErrProcNotFound = syscall.Errno(0x7f) // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 @@ -78,6 +78,13 @@ var ( // ErrNotSupported is an error encountered when hcs doesn't support the request ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) ) type ErrorEvent struct { @@ -117,17 +124,11 @@ func (ev *ErrorEvent) String() string { return evs } -func processHcsResult(resultp *uint16) []ErrorEvent { - if resultp != nil { - resultj := interop.ConvertAndFreeCoTaskMemString(resultp) - logrus.WithField(logfields.JSON, resultj). - Debug("HCS Result") +func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { + if resultJSON != "" { result := &hcsResult{} - if err := json.Unmarshal([]byte(resultj), result); err != nil { - logrus.WithFields(logrus.Fields{ - logfields.JSON: resultj, - logrus.ErrorKey: err, - }).Warning("Could not unmarshal HCS result") + if err := json.Unmarshal([]byte(resultJSON), result); err != nil { + log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") return nil } return result.ErrorEvents @@ -141,6 +142,8 @@ type HcsError struct { Events []ErrorEvent } +var _ net.Error = &HcsError{} + func (e *HcsError) Error() string { s := e.Op + ": " + e.Err.Error() for _, ev := range e.Events { @@ -149,6 +152,16 @@ func (e *HcsError) Error() string { return s } +func (e *HcsError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *HcsError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + // ProcessError is an error encountered in HCS during an operation on a Process object type ProcessError struct { SystemID string @@ -158,27 +171,37 @@ type ProcessError struct { Events []ErrorEvent } +var _ net.Error = &ProcessError{} + // SystemError is an error encountered in HCS during an operation on a Container object type SystemError struct { ID string Op string Err error - Extra string Events []ErrorEvent } +var _ net.Error = &SystemError{} + func (e *SystemError) Error() string { s := e.Op + " " + e.ID + ": " + e.Err.Error() for _, ev := range e.Events { s += "\n" + ev.String() } - if e.Extra != "" { - s += "\n(extra info: " + e.Extra + ")" - } return s } -func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error { +func (e *SystemError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *SystemError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { // Don't double wrap errors if _, ok := err.(*SystemError); ok { return err @@ -186,7 +209,6 @@ func makeSystemError(system *System, op string, extra string, err error, events return &SystemError{ ID: system.ID(), Op: op, - Extra: extra, Err: err, Events: events, } @@ -200,6 +222,16 @@ func (e *ProcessError) Error() string { return s } +func (e *ProcessError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *ProcessError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { // Don't double wrap errors if _, ok := err.(*ProcessError); ok { @@ -217,12 +249,19 @@ func makeProcessError(process *Process, op string, err error, events []ErrorEven // IsNotExist checks if an error is caused by the Container or Process not existing. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { err = getInnerError(err) return err == ErrComputeSystemDoesNotExist || - err == ErrElementNotFound || - err == ErrProcNotFound + err == ErrElementNotFound +} + +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + err = getInnerError(err) + return err == ErrInvalidHandle } // IsAlreadyClosed checks if an error is caused by the Container or Process having been @@ -242,6 +281,9 @@ func IsPending(err error) bool { // IsTimeout returns a boolean indicating whether the error is caused by // a timeout waiting for the operation to complete. func IsTimeout(err error) bool { + if err, ok := err.(net.Error); ok && err.Timeout() { + return true + } err = getInnerError(err) return err == ErrTimeout } @@ -250,12 +292,12 @@ func IsTimeout(err error) bool { // a Container or Process being already stopped. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsAlreadyStopped(err error) bool { err = getInnerError(err) return err == ErrVmcomputeAlreadyStopped || - err == ErrElementNotFound || - err == ErrProcNotFound + err == ErrProcessAlreadyStopped || + err == ErrElementNotFound } // IsNotSupported returns a boolean indicating whether the error is caused by @@ -272,6 +314,20 @@ func IsNotSupported(err error) bool { err == ErrVmcomputeUnknownMessage } +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationInvalidState +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationAccessIsDenied +} + func getInnerError(err error) error { switch pe := err.(type) { case nil: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go deleted file mode 100644 index b0d49cbcf..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go +++ /dev/null @@ -1,48 +0,0 @@ -// Shim for the Host Compute Service (HCS) to manage Windows Server -// containers and Hyper-V containers. - -package hcs - -import ( - "syscall" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hcs.go - -//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? -//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? -//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? -//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? -//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? -//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? -//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? -//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? -//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? -//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? -//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? -//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? -//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? - -//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? -//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? -//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess? -//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? -//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? -//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? -//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? -//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? -//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? - -type hcsSystem syscall.Handle -type hcsProcess syscall.Handle -type hcsCallback syscall.Handle - -type hcsProcessInformation struct { - ProcessId uint32 - Reserved uint32 - StdInput syscall.Handle - StdOutput syscall.Handle - StdError syscall.Handle -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go deleted file mode 100644 index 6d03b17a2..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go +++ /dev/null @@ -1,20 +0,0 @@ -package hcs - -import "github.com/sirupsen/logrus" - -func logOperationBegin(ctx logrus.Fields, msg string) { - logrus.WithFields(ctx).Debug(msg) -} - -func logOperationEnd(ctx logrus.Fields, msg string, err error) { - // Copy the log and fields first. - log := logrus.WithFields(ctx) - if err == nil { - log.Debug(msg) - } else { - // Edit only the copied field data to avoid race conditions on the - // write. - log.Data[logrus.ErrorKey] = err - log.Error(msg) - } -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 41e20bbf9..f4605922a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -1,48 +1,50 @@ package hcs import ( + "context" "encoding/json" + "errors" "io" + "os" "sync" "syscall" "time" - "github.com/Microsoft/hcsshim/internal/guestrequest" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" ) // ContainerError is an error encountered in HCS type Process struct { - handleLock sync.RWMutex - handle hcsProcess - processID int - system *System - cachedPipes *cachedPipes - callbackNumber uintptr - - logctx logrus.Fields + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool + + closedWaitOnce sync.Once + waitBlock chan struct{} + exitCode int + waitError error } -func newProcess(process hcsProcess, processID int, computeSystem *System) *Process { +func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { return &Process{ handle: process, processID: processID, system: computeSystem, - logctx: logrus.Fields{ - logfields.ContainerID: computeSystem.ID(), - logfields.ProcessID: processID, - }, + waitBlock: make(chan struct{}), } } -type cachedPipes struct { - stdIn syscall.Handle - stdOut syscall.Handle - stdErr syscall.Handle -} - type processModifyRequest struct { Operation string ConsoleSize *consoleSize `json:",omitempty"` @@ -58,18 +60,14 @@ type closeHandle struct { Handle string } -type ProcessStatus struct { +type processStatus struct { ProcessID uint32 Exited bool ExitCode uint32 LastWaitResult int32 } -const ( - stdIn string = "StdIn" - stdOut string = "StdOut" - stdErr string = "StdErr" -) +const stdIn string = "StdIn" const ( modifyConsoleSize string = "ConsoleSize" @@ -86,120 +84,188 @@ func (process *Process) SystemID() string { return process.system.ID() } -func (process *Process) logOperationBegin(operation string) { - logOperationBegin( - process.logctx, - operation+" - Begin Operation") -} - -func (process *Process) logOperationEnd(operation string, err error) { - var result string - if err == nil { - result = "Success" - } else { - result = "Error" +func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { + switch err { + case nil: + return true, nil + case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + select { + case <-process.waitBlock: + // The process exit notification has already arrived. + default: + // The process should be gone, but we have not received the notification. + // After a second, force unblock the process wait to work around a possible + // deadlock in the HCS. + go func() { + time.Sleep(time.Second) + process.closedWaitOnce.Do(func() { + log.G(ctx).WithError(err).Warn("force unblocking process waits") + process.exitCode = -1 + process.waitError = err + close(process.waitBlock) + }) + }() + } + return false, nil + default: + return false, err } - - logOperationEnd( - process.logctx, - operation+" - End Operation - "+result, - err) } // Signal signals the process with `options`. -func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) { +// +// For LCOW `guestrequest.SignalProcessOptionsLCOW`. +// +// For WCOW `guestrequest.SignalProcessOptionsWCOW`. +func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "hcsshim::Process::Signal" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() + operation := "hcs::Process::Signal" if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) } optionsb, err := json.Marshal(options) if err != nil { - return err + return false, err } - optionsStr := string(optionsb) - - var resultp *uint16 - syscallWatcher(process.logctx, func() { - err = hcsSignalProcess(process.handle, optionsStr, &resultp) - }) - events := processHcsResult(resultp) + resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) if err != nil { - return makeProcessError(process, operation, err, events) + err = makeProcessError(process, operation, err, events) } - - return nil + return delivered, err } // Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *Process) Kill() (err error) { +func (process *Process) Kill(ctx context.Context) (bool, error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "hcsshim::Process::Kill" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() + operation := "hcs::Process::Kill" if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) } - var resultp *uint16 - syscallWatcher(process.logctx, func() { - err = hcsTerminateProcess(process.handle, &resultp) - }) - events := processHcsResult(resultp) - if err != nil { - return makeProcessError(process, operation, err, events) + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil } - return nil -} - -// Wait waits for the process to exit. -func (process *Process) Wait() (err error) { - operation := "hcsshim::Process::Wait" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() - - err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) if err != nil { - return makeProcessError(process, operation, err, nil) + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) } - return nil + process.killSignalDelivered = delivered + return delivered, err } -// WaitTimeout waits for the process to exit or the duration to elapse. It returns -// false if timeout occurs. -func (process *Process) WaitTimeout(timeout time.Duration) (err error) { - operation := "hcssshim::Process::WaitTimeout" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() +// waitBackground waits for the process exit notification. Once received sets +// `process.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `process.handle` but `Wait` is safe to +// call multiple times. +func (process *Process) waitBackground() { + operation := "hcs::Process::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + var ( + err error + exitCode = -1 + propertiesJSON string + resultJSON string + ) - err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) + err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) if err != nil { - return makeProcessError(process, operation, err, nil) + err = makeProcessError(process, operation, err, nil) + log.G(ctx).WithError(err).Error("failed wait") + } else { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + // Make sure we didnt race with Close() here + if process.handle != 0 { + propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + err = makeProcessError(process, operation, err, events) //nolint:ineffassign + } else { + properties := &processStatus{} + err = json.Unmarshal([]byte(propertiesJSON), properties) + if err != nil { + err = makeProcessError(process, operation, err, nil) //nolint:ineffassign + } else { + if properties.LastWaitResult != 0 { + log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") + } else { + exitCode = int(properties.ExitCode) + } + } + } + } } + log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") - return nil + process.closedWaitOnce.Do(func() { + process.exitCode = exitCode + process.waitError = err + close(process.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait waits for the process to exit. If the process has already exited returns +// the pervious error (if any). +func (process *Process) Wait() error { + <-process.waitBlock + return process.waitError } // ResizeConsole resizes the console of the process. -func (process *Process) ResizeConsole(width, height uint16) (err error) { +func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "hcsshim::Process::ResizeConsole" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() + operation := "hcs::Process::ResizeConsole" if process.handle == 0 { return makeProcessError(process, operation, ErrAlreadyClosed, nil) @@ -218,11 +284,8 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) { return err } - modifyRequestStr := string(modifyRequestb) - - var resultp *uint16 - err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) - events := processHcsResult(resultp) + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) if err != nil { return makeProcessError(process, operation, err, events) } @@ -230,104 +293,55 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) { return nil } -func (process *Process) Properties() (_ *ProcessStatus, err error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcsshim::Process::Properties" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() - - if process.handle == 0 { - return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - var ( - resultp *uint16 - propertiesp *uint16 - ) - syscallWatcher(process.logctx, func() { - err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp) - }) - events := processHcsResult(resultp) - if err != nil { - return nil, makeProcessError(process, operation, err, events) - } - - if propertiesp == nil { - return nil, ErrUnexpectedValue - } - propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp) - - properties := &ProcessStatus{} - if err := json.Unmarshal(propertiesRaw, properties); err != nil { - return nil, makeProcessError(process, operation, err, nil) - } - - return properties, nil -} - // ExitCode returns the exit code of the process. The process must have // already terminated. -func (process *Process) ExitCode() (_ int, err error) { - operation := "hcsshim::Process::ExitCode" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() - - properties, err := process.Properties() - if err != nil { - return 0, makeProcessError(process, operation, err, nil) - } - - if properties.Exited == false { - return 0, makeProcessError(process, operation, ErrInvalidProcessState, nil) - } - - if properties.LastWaitResult != 0 { - return 0, makeProcessError(process, operation, syscall.Errno(properties.LastWaitResult), nil) +func (process *Process) ExitCode() (int, error) { + select { + case <-process.waitBlock: + if process.waitError != nil { + return -1, process.waitError + } + return process.exitCode, nil + default: + return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) } - - return int(properties.ExitCode), nil } -// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes; it should be possible to -// call this multiple times to get multiple interfaces. -func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { +// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes. Once returned, these pipes +// are the responsibility of the caller to close. +func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { + operation := "hcs::Process::StdioLegacy" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "hcsshim::Process::Stdio" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() - if process.handle == 0 { return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) } - var stdIn, stdOut, stdErr syscall.Handle - - if process.cachedPipes == nil { - var ( - processInfo hcsProcessInformation - resultp *uint16 - ) - err = hcsGetProcessInfo(process.handle, &processInfo, &resultp) - events := processHcsResult(resultp) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, events) - } - - stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError - } else { - // Use cached pipes - stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.hasCachedStdio { + stdin, stdout, stderr := process.stdin, process.stdout, process.stderr + process.stdin, process.stdout, process.stderr = nil, nil, nil + process.hasCachedStdio = false + return stdin, stdout, stderr, nil + } - // Invalidate the cache - process.cachedPipes = nil + processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, events) } - pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr}) + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) if err != nil { return nil, nil, nil, makeProcessError(process, operation, err, nil) } @@ -335,15 +349,21 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo return pipes[0], pipes[1], pipes[2], nil } +// Stdio returns the stdin, stdout, and stderr pipes, respectively. +// To close them, close the process handle. +func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + return process.stdin, process.stdout, process.stderr +} + // CloseStdin closes the write side of the stdin pipe so that the process is // notified on the read side that there is no more data in stdin. -func (process *Process) CloseStdin() (err error) { +func (process *Process) CloseStdin(ctx context.Context) error { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "hcsshim::Process::CloseStdin" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() + operation := "hcs::Process::CloseStdin" if process.handle == 0 { return makeProcessError(process, operation, ErrAlreadyClosed, nil) @@ -361,99 +381,177 @@ func (process *Process) CloseStdin() (err error) { return err } - modifyRequestStr := string(modifyRequestb) - - var resultp *uint16 - err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) - events := processHcsResult(resultp) + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) if err != nil { return makeProcessError(process, operation, err, events) } + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + process.stdioLock.Unlock() + + return nil +} + +func (process *Process) CloseStdout(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + return nil +} + +func (process *Process) CloseStderr(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + + } return nil } // Close cleans up any state associated with the process but does not kill // or wait on it. func (process *Process) Close() (err error) { + operation := "hcs::Process::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + process.handleLock.Lock() defer process.handleLock.Unlock() - operation := "hcsshim::Process::Close" - process.logOperationBegin(operation) - defer func() { process.logOperationEnd(operation, err) }() - // Don't double free this if process.handle == 0 { return nil } - if err = process.unregisterCallback(); err != nil { + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + } + process.stdioLock.Unlock() + + if err = process.unregisterCallback(ctx); err != nil { return makeProcessError(process, operation, err, nil) } - if err = hcsCloseProcess(process.handle); err != nil { + if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { return makeProcessError(process, operation, err, nil) } process.handle = 0 + process.closedWaitOnce.Do(func() { + process.exitCode = -1 + process.waitError = ErrAlreadyClosed + close(process.waitBlock) + }) return nil } -func (process *Process) registerCallback() error { - context := ¬ifcationWatcherContext{ - channels: newChannels(), +func (process *Process) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newProcessChannels(), + systemID: process.SystemID(), + processID: process.processID, } callbackMapLock.Lock() callbackNumber := nextCallback nextCallback++ - callbackMap[callbackNumber] = context + callbackMap[callbackNumber] = callbackContext callbackMapLock.Unlock() - var callbackHandle hcsCallback - err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) + callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) if err != nil { return err } - context.handle = callbackHandle + callbackContext.handle = callbackHandle process.callbackNumber = callbackNumber return nil } -func (process *Process) unregisterCallback() error { +func (process *Process) unregisterCallback(ctx context.Context) error { callbackNumber := process.callbackNumber callbackMapLock.RLock() - context := callbackMap[callbackNumber] + callbackContext := callbackMap[callbackNumber] callbackMapLock.RUnlock() - if context == nil { + if callbackContext == nil { return nil } - handle := context.handle + handle := callbackContext.handle if handle == 0 { return nil } - // hcsUnregisterProcessCallback has its own syncronization - // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := hcsUnregisterProcessCallback(handle) + // vmcompute.HcsUnregisterProcessCallback has its own synchronization to + // wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) if err != nil { return err } - closeChannels(context.channels) + closeChannels(callbackContext.channels) callbackMapLock.Lock() - callbackMap[callbackNumber] = nil + delete(callbackMap, callbackNumber) callbackMapLock.Unlock() - handle = 0 + handle = 0 //nolint:ineffassign return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go similarity index 94% rename from vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go index 995433ace..b621c5593 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go @@ -4,7 +4,8 @@ import ( "encoding/json" "time" - "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/go-winio/pkg/guid" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" ) // ProcessConfig is used as both the input of Container.CreateProcess @@ -62,7 +63,7 @@ type MappedVirtualDisk struct { CreateInUtilityVM bool `json:",omitempty"` ReadOnly bool `json:",omitempty"` Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" - AttachOnly bool `json:",omitempty:` + AttachOnly bool `json:",omitempty"` } // AssignedDevice represents a device that has been directly assigned to a container @@ -118,9 +119,9 @@ type PropertyType string const ( PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 - PropertyTypeProcessList = "ProcessList" // V1 and V2 - PropertyTypeMappedVirtualDisk = "MappedVirtualDisk" // Not supported in V2 schema call - PropertyTypeGuestConnection = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 + PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 ) type PropertyQuery struct { @@ -133,9 +134,10 @@ type ContainerProperties struct { State string Name string SystemType string + RuntimeOSType string `json:"RuntimeOsType,omitempty"` Owner string SiloGUID string `json:"SiloGuid,omitempty"` - RuntimeID string `json:"RuntimeId,omitempty"` + RuntimeID guid.GUID `json:"RuntimeId,omitempty"` IsRuntimeTemplate bool `json:",omitempty"` RuntimeImagePath string `json:",omitempty"` Stopped bool `json:",omitempty"` @@ -212,8 +214,11 @@ type MappedVirtualDiskController struct { // GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM type GuestDefinedCapabilities struct { - NamespaceAddRequestSupported bool `json:",omitempty"` - SignalProcessSupported bool `json:",omitempty"` + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` + DumpStacksSupported bool `json:",omitempty"` + DeleteContainerStateSupported bool `json:",omitempty"` + UpdateContainerSupported bool `json:",omitempty"` } // GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go similarity index 78% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go index 09456cbc2..70884aad7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -10,7 +10,6 @@ package hcsschema type Attachment struct { - Type_ string `json:"Type,omitempty"` Path string `json:"Path,omitempty"` @@ -28,4 +27,10 @@ type Attachment struct { CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go index 243779eab..c1ea3953b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go @@ -10,7 +10,6 @@ package hcsschema type CacheQueryStatsResponse struct { - L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go index 88f01707a..b4f9c315b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go @@ -10,6 +10,5 @@ package hcsschema type CloseHandle struct { - Handle string `json:"Handle,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go index c665be3d5..8bf8cab60 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go @@ -11,7 +11,6 @@ package hcsschema // ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. type ComPort struct { - NamedPipe string `json:"NamedPipe,omitempty"` OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go similarity index 91% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go index 85785d285..10cea67e0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go @@ -10,14 +10,13 @@ package hcsschema type ComputeSystem struct { - Owner string `json:"Owner,omitempty"` SchemaVersion *Version `json:"SchemaVersion,omitempty"` HostingSystemId string `json:"HostingSystemId,omitempty"` - HostedSystem *HostedSystem `json:"HostedSystem,omitempty"` + HostedSystem interface{} `json:"HostedSystem,omitempty"` Container *Container `json:"Container,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go similarity index 67% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go index 1a47db7d9..1d5dfe68a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go @@ -25,37 +25,37 @@ func (c contextKey) String() string { var ( // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. - ContextOAuth2 = contextKey("token") + ContextOAuth2 = contextKey("token") // ContextBasicAuth takes BasicAuth as authentication for the request. - ContextBasicAuth = contextKey("basic") + ContextBasicAuth = contextKey("basic") // ContextAccessToken takes a string oauth2 access token as authentication for the request. - ContextAccessToken = contextKey("accesstoken") + ContextAccessToken = contextKey("accesstoken") // ContextAPIKey takes an APIKey as authentication for the request - ContextAPIKey = contextKey("apikey") + ContextAPIKey = contextKey("apikey") ) -// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth type BasicAuth struct { - UserName string `json:"userName,omitempty"` - Password string `json:"password,omitempty"` + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` } // APIKey provides API key based authentication to a request passed via context using ContextAPIKey type APIKey struct { - Key string - Prefix string + Key string + Prefix string } type Configuration struct { - BasePath string `json:"basePath,omitempty"` - Host string `json:"host,omitempty"` - Scheme string `json:"scheme,omitempty"` - DefaultHeader map[string]string `json:"defaultHeader,omitempty"` - UserAgent string `json:"userAgent,omitempty"` - HTTPClient *http.Client + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client } func NewConfiguration() *Configuration { @@ -69,4 +69,4 @@ func NewConfiguration() *Configuration { func (c *Configuration) AddDefaultHeader(key string, value string) { c.DefaultHeader[key] = value -} \ No newline at end of file +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go index adbe07fe5..68aa04a57 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go @@ -10,7 +10,6 @@ package hcsschema type ConsoleSize struct { - Height int32 `json:"Height,omitempty"` Width int32 `json:"Width,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go similarity index 90% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go index 17dce28bc..39a54432c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -10,7 +10,6 @@ package hcsschema type Container struct { - GuestOs *GuestOs `json:"GuestOs,omitempty"` Storage *Storage `json:"Storage,omitempty"` @@ -32,4 +31,6 @@ type Container struct { RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go new file mode 100644 index 000000000..495c6ebc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardAddInstanceRequest struct { + Id string `json:"Id,omitempty"` + CredentialSpec string `json:"CredentialSpec,omitempty"` + Transport string `json:"Transport,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go new file mode 100644 index 000000000..1ed4c008f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardHvSocketServiceConfig struct { + ServiceId string `json:"ServiceId,omitempty"` + ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go new file mode 100644 index 000000000..d7ebd0fcc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardInstance struct { + Id string `json:"Id,omitempty"` + CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` + HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go new file mode 100644 index 000000000..71005b090 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardModifyOperation string + +const ( + AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" + RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go new file mode 100644 index 000000000..952cda496 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardOperationRequest struct { + Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go similarity index 57% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go index ca319bbbc..32e5a3bee 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go @@ -3,14 +3,12 @@ * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema -type Device struct { - - // The interface class guid of the device to assign to container. - InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` +type ContainerCredentialGuardRemoveInstanceRequest struct { + Id string `json:"Id,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go new file mode 100644 index 000000000..ea306fa21 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardSystemInfo struct { + Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go index 754797e21..1fd7ca5d5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go @@ -11,7 +11,6 @@ package hcsschema // memory usage as viewed from within the container type ContainerMemoryInformation struct { - TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` TotalUsage int32 `json:"TotalUsage,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go new file mode 100644 index 000000000..90332a519 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines +type CpuGroup struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go new file mode 100644 index 000000000..8794961bf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupAffinity struct { + LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go new file mode 100644 index 000000000..0be0475d4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupConfig struct { + GroupId string `json:"GroupId,omitempty"` + Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` + GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` + // Hypervisor CPU group IDs exposed to clients + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go new file mode 100644 index 000000000..3ace0ccc3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to return cpu groups for a Service property query +type CpuGroupConfigurations struct { + CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go new file mode 100644 index 000000000..7d8978070 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupOperation string + +const ( + CreateGroup CPUGroupOperation = "CreateGroup" + DeleteGroup CPUGroupOperation = "DeleteGroup" + SetProperty CPUGroupOperation = "SetProperty" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go new file mode 100644 index 000000000..bbad6a2c4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupProperty struct { + PropertyCode uint32 `json:"PropertyCode,omitempty"` + PropertyValue uint32 `json:"PropertyValue,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go new file mode 100644 index 000000000..91a8278fe --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Create group operation settings +type CreateGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go new file mode 100644 index 000000000..134bd9881 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Delete group operation settings +type DeleteGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go new file mode 100644 index 000000000..31c4538af --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceType string + +const ( + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" +) + +type Device struct { + // The type of device to assign to the container. + Type DeviceType `json:"Type,omitempty"` + // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. + LocationPath string `json:"LocationPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go similarity index 86% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go index b2191c571..e985d96d2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go @@ -10,7 +10,6 @@ package hcsschema type Devices struct { - ComPorts map[string]ComPort `json:"ComPorts,omitempty"` Scsi map[string]Scsi `json:"Scsi,omitempty"` @@ -40,4 +39,8 @@ type Devices struct { FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` + + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + VirtualPci map[string]VirtualPciDevice `json:",omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go index 4fe592f71..85450c41e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go @@ -10,6 +10,5 @@ package hcsschema type EnhancedModeVideo struct { - ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go index 51011afe4..fe86cab65 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go @@ -10,7 +10,6 @@ package hcsschema type FlexibleIoDevice struct { - EmulatorId string `json:"EmulatorId,omitempty"` HostingModel string `json:"HostingModel,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go index c5fa76735..af8280048 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go @@ -10,6 +10,5 @@ package hcsschema type GuestCrashReporting struct { - WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go index c708fc7c3..8838519a3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go @@ -10,6 +10,5 @@ package hcsschema type GuestOs struct { - HostName string `json:"HostName,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go new file mode 100644 index 000000000..2238ce530 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to request a service processor modification +type HostProcessorModificationRequest struct { + Operation CPUGroupOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go index 0797584c5..ea3084bca 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go @@ -10,7 +10,6 @@ package hcsschema type HostedSystem struct { - SchemaVersion *Version `json:"SchemaVersion,omitempty"` Container *Container `json:"Container,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go index ef9ffb8dd..23b2ee9e7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go @@ -10,7 +10,6 @@ package hcsschema type HvSocket struct { - Config *HvSocketSystemConfig `json:"Config,omitempty"` EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go index a19ba15c1..a017691f0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go @@ -11,6 +11,5 @@ package hcsschema // HvSocket configuration for a VM type HvSocket2 struct { - HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go new file mode 100644 index 000000000..84c11b93e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. +type HvSocketAddress struct { + LocalAddress string `json:"LocalAddress,omitempty"` + ParentAddress string `json:"ParentAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go similarity index 76% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go index a848e91e6..ecd9f7fba 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go @@ -19,4 +19,10 @@ type HvSocketServiceConfig struct { // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` + + // Disabled controls whether the HvSocket service is accepting connection requests. + // This set to true will make the service refuse all incoming connections as well as cancel + // any connections already established. The service itself will still be active however + // and can be re-enabled at a future time. + Disabled bool `json:"Disabled,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go new file mode 100644 index 000000000..a614d63bd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go @@ -0,0 +1,42 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterruptModerationName string + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +type InterruptModerationValue uint32 + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go new file mode 100644 index 000000000..2a55cc37c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IovSettings struct { + // The weight assigned to this port for I/O virtualization (IOV) offloading. + // Setting this to 0 disables IOV offloading. + OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` + + // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. + QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` + + // The interrupt moderation mode for I/O virtualization (IOV) offloading. + InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go index b63b8ef12..176c49d49 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -10,7 +10,6 @@ package hcsschema type Layer struct { - Id string `json:"Id,omitempty"` Path string `json:"Path,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go new file mode 100644 index 000000000..2e3aa5e17 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LogicalProcessor struct { + LpIndex uint32 `json:"LpIndex,omitempty"` + NodeNumber uint8 `json:"NodeNumber,omitempty"` + PackageId uint32 `json:"PackageId,omitempty"` + CoreId uint32 `json:"CoreId,omitempty"` + RootVpIndex int32 `json:"RootVpIndex,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go index a823a6d3b..9b86a4045 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go @@ -10,7 +10,6 @@ package hcsschema type MappedDirectory struct { - HostPath string `json:"HostPath,omitempty"` HostPathType string `json:"HostPathType,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go index 2d1d2604a..208074e9a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go @@ -10,7 +10,6 @@ package hcsschema type MappedPipe struct { - ContainerPipeName string `json:"ContainerPipeName,omitempty"` HostPath string `json:"HostPath,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go similarity index 86% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go index e1d135a3a..30749c672 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go @@ -10,6 +10,5 @@ package hcsschema type Memory struct { - - SizeInMB int32 `json:"SizeInMB,omitempty"` + SizeInMB uint64 `json:"SizeInMB,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go new file mode 100644 index 000000000..71224c75b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go @@ -0,0 +1,49 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + + // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed + // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by + // the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + + // LowMmioGapInMB is the low MMIO region allocated below 4GB. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + + // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and + // size). + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + + // HighMmioGapInMB is the high MMIO region. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go similarity index 87% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go index bdd87dffd..811779b04 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go @@ -10,8 +10,7 @@ package hcsschema type MemoryInformationForVm struct { - - VirtualNodeCount int32 `json:"VirtualNodeCount,omitempty"` + VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go similarity index 55% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go index 6214970f6..906ba597f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go @@ -11,10 +11,9 @@ package hcsschema // Memory runtime statistics type MemoryStats struct { + MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` - MemoryUsageCommitBytes int32 `json:"MemoryUsageCommitBytes,omitempty"` + MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` - MemoryUsageCommitPeakBytes int32 `json:"MemoryUsageCommitPeakBytes,omitempty"` - - MemoryUsagePrivateWorkingSetBytes int32 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` + MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 000000000..8dbe40b3b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 000000000..8fe89f927 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 000000000..a62568d89 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 000000000..a7410febd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 000000000..355364064 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 000000000..7be98b541 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 000000000..3ab9cf1ec --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 000000000..d2f51b3b5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 000000000..47dfb55bf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 000000000..8867ebe5f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go new file mode 100644 index 000000000..1384ed888 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModificationRequest struct { + PropertyType PropertyType `json:"PropertyType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go similarity index 76% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go index c586f66c2..7408abd31 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go @@ -10,8 +10,8 @@ package hcsschema type NetworkAdapter struct { - EndpointId string `json:"EndpointId,omitempty"` - MacAddress string `json:"MacAddress,omitempty"` + // The I/O virtualization (IOV) offloading configuration. + IovSettings *IovSettings `json:"IovSettings,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go index 12c47827c..e5ea187a2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go @@ -10,7 +10,6 @@ package hcsschema type Networking struct { - AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` DnsSearchList string `json:"DnsSearchList,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go index 1cd70d179..d96c9501f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go @@ -11,6 +11,5 @@ package hcsschema // Notification data that is indicated to components running in the Virtual Machine. type PauseNotification struct { - Reason string `json:"Reason,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go index 780a5cae2..21707a88e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go @@ -11,7 +11,6 @@ package hcsschema // Options for HcsPauseComputeSystem type PauseOptions struct { - SuspensionLevel string `json:"SuspensionLevel,omitempty"` HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go index 705c677e1..29d8c8012 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go @@ -10,6 +10,5 @@ package hcsschema type Plan9 struct { - Shares []Plan9Share `json:"Shares,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go similarity index 94% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go index eb171817a..41f8fdea0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go @@ -10,7 +10,6 @@ package hcsschema type Plan9Share struct { - Name string `json:"Name,omitempty"` // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. @@ -30,4 +29,6 @@ type Plan9Share struct { ReadOnly bool `json:"ReadOnly,omitempty"` UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go index 63e0b7f8f..e9a662dd5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go @@ -15,7 +15,6 @@ import ( // Information about a process running in a container type ProcessDetails struct { - ProcessId int32 `json:"ProcessId,omitempty"` ImageName string `json:"ImageName,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go index 29bc2e3d0..e4ed095c7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go @@ -11,7 +11,6 @@ package hcsschema // Passed to HcsRpc_ModifyProcess type ProcessModifyRequest struct { - Operation string `json:"Operation,omitempty"` ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go index 470c55734..82b0d0532 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go @@ -10,7 +10,6 @@ package hcsschema type ProcessParameters struct { - ApplicationName string `json:"ApplicationName,omitempty"` CommandLine string `json:"CommandLine,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go index 20793d150..ad9a4fa9a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go @@ -11,7 +11,6 @@ package hcsschema // Status of a process running in a container type ProcessStatus struct { - ProcessId int32 `json:"ProcessId,omitempty"` Exited bool `json:"Exited,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go index 7a60b0245..bb24e88da 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go @@ -10,7 +10,6 @@ package hcsschema type Processor struct { - Count int32 `json:"Count,omitempty"` Maximum int32 `json:"Maximum,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go similarity index 73% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go index 40d3e7356..c64f335ec 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go @@ -3,14 +3,13 @@ * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.5 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema type Processor2 struct { - Count int32 `json:"Count,omitempty"` Limit int32 `json:"Limit,omitempty"` @@ -18,4 +17,7 @@ type Processor2 struct { Weight int32 `json:"Weight,omitempty"` ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` + + // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go similarity index 62% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go index 9d3b77e57..6157e2522 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go @@ -11,10 +11,9 @@ package hcsschema // CPU runtime statistics type ProcessorStats struct { + TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` - TotalRuntime100ns int32 `json:"TotalRuntime100ns,omitempty"` + RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` - RuntimeUser100ns int32 `json:"RuntimeUser100ns,omitempty"` - - RuntimeKernel100ns int32 `json:"RuntimeKernel100ns,omitempty"` + RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go new file mode 100644 index 000000000..885156e77 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessorTopology struct { + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go similarity index 83% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go index 6db2a48f6..17558cba0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -9,8 +9,11 @@ package hcsschema -type Properties struct { +import ( + v1 "github.com/containerd/cgroups/stats/v1" +) +type Properties struct { Id string `json:"Id,omitempty"` SystemType string `json:"SystemType,omitempty"` @@ -44,4 +47,8 @@ type Properties struct { SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` + + // Metrics is not part of the API for HCS but this is used for LCOW v2 to + // return the full cgroup metrics from the guest. + Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go similarity index 75% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go index 22b92ffdf..d6d80df13 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go @@ -9,8 +9,7 @@ package hcsschema -// By default the basic properties will be returned. This query provides a way to request specific properties. +// By default the basic properties will be returned. This query provides a way to request specific properties. type PropertyQuery struct { - - PropertyTypes []string `json:"PropertyTypes,omitempty"` + PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go new file mode 100644 index 000000000..98f2c96ed --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type PropertyType string + +const ( + PTMemory PropertyType = "Memory" + PTGuestMemory PropertyType = "GuestMemory" + PTStatistics PropertyType = "Statistics" + PTProcessList PropertyType = "ProcessList" + PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" + PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" + PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. + PTGuestConnection PropertyType = "GuestConnection" + PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" + PTProcessorTopology PropertyType = "ProcessorTopology" + PTCPUGroup PropertyType = "CpuGroup" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go index 97e453128..8d5f5c171 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go @@ -10,7 +10,6 @@ package hcsschema type RdpConnectionOptions struct { - AccessSids []string `json:"AccessSids,omitempty"` NamedPipe string `json:"NamedPipe,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go index fa574ccc8..006906f6e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go @@ -10,7 +10,6 @@ package hcsschema type RegistryChanges struct { - AddValues []RegistryValue `json:"AddValues,omitempty"` DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go index fab03bc60..26fde99c7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -10,7 +10,6 @@ package hcsschema type RegistryKey struct { - Hive string `json:"Hive,omitempty"` Name string `json:"Name,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go index 1589f4841..3f203176c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -10,7 +10,6 @@ package hcsschema type RegistryValue struct { - Key *RegistryKey `json:"Key,omitempty"` Name string `json:"Name,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go new file mode 100644 index 000000000..b8142ca6a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "encoding/json" + +type ServiceProperties struct { + // Changed Properties field to []json.RawMessage from []interface{} to avoid having to + // remarshal sp.Properties[n] and unmarshal into the type(s) we want. + Properties []json.RawMessage `json:"Properties,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go index bd573f6cd..df9baa921 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go @@ -10,6 +10,5 @@ package hcsschema type SharedMemoryConfiguration struct { - Regions []SharedMemoryRegion `json:"Regions,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go index a57b2cba7..825b71865 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go @@ -10,7 +10,6 @@ package hcsschema type SharedMemoryRegion struct { - SectionName string `json:"SectionName,omitempty"` StartOffset int32 `json:"StartOffset,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go index d9a50cc7d..f67b08eb5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go @@ -10,7 +10,6 @@ package hcsschema type SharedMemoryRegionInfo struct { - SectionName string `json:"SectionName,omitempty"` GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go index 599c06e8a..5eaf6a7f4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go @@ -11,7 +11,6 @@ package hcsschema // Silo job information type SiloProperties struct { - Enabled bool `json:"Enabled,omitempty"` JobName string `json:"JobName,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go similarity index 92% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go index 5cb3ed93b..ba7a6b396 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go @@ -15,12 +15,11 @@ import ( // Runtime statistics for a container type Statistics struct { - Timestamp time.Time `json:"Timestamp,omitempty"` ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` - Uptime100ns int32 `json:"Uptime100ns,omitempty"` + Uptime100ns uint64 `json:"Uptime100ns,omitempty"` Processor *ProcessorStats `json:"Processor,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go index 8c5255df1..9c5e6eb53 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go @@ -10,7 +10,6 @@ package hcsschema type StorageQoS struct { - IopsMaximum int32 `json:"IopsMaximum,omitempty"` BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go similarity index 56% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go index 198ea57d7..4f042ffd9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go @@ -11,12 +11,11 @@ package hcsschema // Storage runtime statistics type StorageStats struct { + ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` - ReadCountNormalized int32 `json:"ReadCountNormalized,omitempty"` + ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` - ReadSizeBytes int32 `json:"ReadSizeBytes,omitempty"` + WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` - WriteCountNormalized int32 `json:"WriteCountNormalized,omitempty"` - - WriteSizeBytes int32 `json:"WriteSizeBytes,omitempty"` + WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go index af2e3c823..834869940 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -10,7 +10,6 @@ package hcsschema type Topology struct { - Memory *Memory2 `json:"Memory,omitempty"` Processor *Processor2 `json:"Processor,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go index ba91178f9..0e48ece50 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go @@ -10,7 +10,6 @@ package hcsschema type Uefi struct { - EnableDebugger bool `json:"EnableDebugger,omitempty"` SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go index 6620fb2bc..3ab409d82 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go @@ -10,7 +10,6 @@ package hcsschema type UefiBootEntry struct { - DeviceType string `json:"DeviceType,omitempty"` DevicePath string `json:"DevicePath,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go index 62c0e4d12..2abfccca3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go @@ -10,7 +10,6 @@ package hcsschema type Version struct { - Major int32 `json:"Major,omitempty"` Minor int32 `json:"Minor,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go index 0958e5606..ec5d0fb93 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go @@ -10,7 +10,6 @@ package hcsschema type VideoMonitor struct { - HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` VerticalResolution int32 `json:"VerticalResolution,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go index 48402d8ec..91a3c83d4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go @@ -10,7 +10,6 @@ package hcsschema type VirtualNodeInfo struct { - VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go similarity index 100% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go index 47714444a..70cf2d90d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go @@ -10,7 +10,6 @@ package hcsschema type VirtualPMemDevice struct { - HostPath string `json:"HostPath,omitempty"` ReadOnly bool `json:"ReadOnly,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 000000000..9ef322f61 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go new file mode 100644 index 000000000..f5e05903c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciDevice struct { + Functions []VirtualPciFunction `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go new file mode 100644 index 000000000..cedb7d18b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciFunction struct { + DeviceInstancePath string `json:",omitempty"` + + VirtualFunction uint16 `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go index 76131b3a7..362df363e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go @@ -10,7 +10,6 @@ package hcsschema type VirtualSmb struct { - Shares []VirtualSmbShare `json:"Shares,omitempty"` DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go index b50098a42..915e9b638 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go @@ -10,7 +10,6 @@ package hcsschema type VirtualSmbShare struct { - Name string `json:"Name,omitempty"` Path string `json:"Path,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go index c1894279d..75196bd8c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go @@ -10,7 +10,6 @@ package hcsschema type VirtualSmbShareOptions struct { - ReadOnly bool `json:"ReadOnly,omitempty"` // convert exclusive access to shared read access diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go similarity index 83% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go index 39f628667..8e1836dd6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go @@ -10,14 +10,13 @@ package hcsschema type VmMemory struct { - AvailableMemory int32 `json:"AvailableMemory,omitempty"` AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` - ReservedMemory int32 `json:"ReservedMemory,omitempty"` + ReservedMemory uint64 `json:"ReservedMemory,omitempty"` - AssignedMemory int32 `json:"AssignedMemory,omitempty"` + AssignedMemory uint64 `json:"AssignedMemory,omitempty"` SlpActive bool `json:"SlpActive,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go new file mode 100644 index 000000000..de1b9cf1a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 `json:"Limit,omitempty"` + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 `json:"Weight,omitempty"` + // Minimum amount of host CPU resources that the virtual machine is guaranteed. + Reservation uint64 `json:"Reservation,omitempty"` + // Provides the target maximum CPU frequency, in MHz, for a virtual machine. + MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go index cf632bbc8..8ed7e566d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -10,7 +10,6 @@ package hcsschema type WindowsCrashReporting struct { - DumpFileName string `json:"DumpFileName,omitempty"` MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go new file mode 100644 index 000000000..a634dfc15 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -0,0 +1,49 @@ +package hcs + +import ( + "context" + "encoding/json" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +// GetServiceProperties returns properties of the host compute service. +func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { + operation := "hcs::GetServiceProperties" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.ServiceProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, err + } + return properties, nil +} + +// ModifyServiceSettings modifies settings of the host compute service. +func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { + operation := "hcs::ModifyServiceSettings" + + settingsJSON, err := json.Marshal(settings) + if err != nil { + return err + } + resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return &HcsError{Op: operation, Err: err, Events: events} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 20b242524..75499c967 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -1,86 +1,55 @@ package hcs import ( + "context" "encoding/json" - "os" - "strconv" + "errors" + "strings" "sync" "syscall" - "time" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/internal/cow" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" ) -// currentContainerStarts is used to limit the number of concurrent container -// starts. -var currentContainerStarts containerStarts - -type containerStarts struct { - maxParallel int - inProgress int - sync.Mutex -} - -func init() { - mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START") - if len(mpsS) > 0 { - mpsI, err := strconv.Atoi(mpsS) - if err != nil || mpsI < 0 { - return - } - currentContainerStarts.maxParallel = mpsI - } -} - type System struct { handleLock sync.RWMutex - handle hcsSystem + handle vmcompute.HcsSystem id string callbackNumber uintptr - logctx logrus.Fields + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error + exitError error + os, typ string } func newSystem(id string) *System { return &System{ - id: id, - logctx: logrus.Fields{ - logfields.ContainerID: id, - }, + id: id, + waitBlock: make(chan struct{}), } } -func (computeSystem *System) logOperationBegin(operation string) { - logOperationBegin( - computeSystem.logctx, - operation+" - Begin Operation") -} - -func (computeSystem *System) logOperationEnd(operation string, err error) { - var result string - if err == nil { - result = "Success" - } else { - result = "Error" - } - - logOperationEnd( - computeSystem.logctx, - operation+" - End Operation - "+result, - err) -} - // CreateComputeSystem creates a new compute system with the given configuration but does not start it. -func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) { - operation := "hcsshim::CreateComputeSystem" +func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcs::CreateComputeSystem" + + // hcsCreateComputeSystemContext is an async operation. Start the outer span + // here to measure the full create time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", id)) computeSystem := newSystem(id) - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() hcsDocumentB, err := json.Marshal(hcsDocumentInterface) if err != nil { @@ -89,126 +58,114 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System hcsDocument := string(hcsDocumentB) - logrus.WithFields(computeSystem.logctx). - WithField(logfields.JSON, hcsDocument). - Debug("HCS ComputeSystem Document") - var ( - resultp *uint16 identity syscall.Handle + resultJSON string createError error ) - syscallWatcher(computeSystem.logctx, func() { - createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp) - }) - + computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) if createError == nil || IsPending(createError) { - if err = computeSystem.registerCallback(); err != nil { + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. - computeSystem.Terminate() - return nil, makeSystemError(computeSystem, operation, "", err, nil) + _ = computeSystem.Terminate(ctx) + return nil, makeSystemError(computeSystem, operation, err, nil) } } - events, err := processAsyncHcsResult(createError, resultp, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) if err != nil { if err == ErrTimeout { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. - computeSystem.Terminate() + _ = computeSystem.Terminate(ctx) } - return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events) + return nil, makeSystemError(computeSystem, operation, err, events) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err } - return computeSystem, nil } // OpenComputeSystem opens an existing compute system by ID. -func OpenComputeSystem(id string) (_ *System, err error) { - operation := "hcsshim::OpenComputeSystem" +func OpenComputeSystem(ctx context.Context, id string) (*System, error) { + operation := "hcs::OpenComputeSystem" computeSystem := newSystem(id) - computeSystem.logOperationBegin(operation) + handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + computeSystem.handle = handle defer func() { - if IsNotExist(err) { - computeSystem.logOperationEnd(operation, nil) - } else { - computeSystem.logOperationEnd(operation, err) + if err != nil { + computeSystem.Close() } }() + if err = computeSystem.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} - var ( - handle hcsSystem - resultp *uint16 - ) - err = hcsOpenComputeSystem(id, &handle, &resultp) - events := processHcsResult(resultp) +func (computeSystem *System) getCachedProperties(ctx context.Context) error { + props, err := computeSystem.Properties(ctx) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, events) + return err } - - computeSystem.handle = handle - - if err = computeSystem.registerCallback(); err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + computeSystem.typ = strings.ToLower(props.SystemType) + computeSystem.os = strings.ToLower(props.RuntimeOSType) + if computeSystem.os == "" && computeSystem.typ == "container" { + // Pre-RS5 HCS did not return the OS, but it only supported containers + // that ran Windows. + computeSystem.os = "windows" } - - return computeSystem, nil + return nil } -// GetComputeSystems gets a list of the compute systems on the system that match the query -func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) { - operation := "hcsshim::GetComputeSystems" - fields := logrus.Fields{} - logOperationBegin( - fields, - operation+" - Begin Operation") +// OS returns the operating system of the compute system, "linux" or "windows". +func (computeSystem *System) OS() string { + return computeSystem.os +} - defer func() { - var result string - if err == nil { - result = "Success" - } else { - result = "Error" - } +// IsOCI returns whether processes in the compute system should be created via +// OCI. +func (computeSystem *System) IsOCI() bool { + return computeSystem.os == "linux" && computeSystem.typ == "container" +} - logOperationEnd( - fields, - operation+" - End Operation - "+result, - err) - }() +// GetComputeSystems gets a list of the compute systems on the system that match the query +func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { + operation := "hcs::GetComputeSystems" queryb, err := json.Marshal(q) if err != nil { return nil, err } - query := string(queryb) - - logrus.WithFields(fields). - WithField(logfields.JSON, query). - Debug("HCS ComputeSystem Query") - - var ( - resultp *uint16 - computeSystemsp *uint16 - ) - - syscallWatcher(fields, func() { - err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) - }) - events := processHcsResult(resultp) + computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) if err != nil { return nil, &HcsError{Op: operation, Err: err, Events: events} } - if computeSystemsp == nil { + if computeSystemsJSON == "" { return nil, ErrUnexpectedValue } - computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp) computeSystems := []schema1.ContainerProperties{} - if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil { + if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { return nil, err } @@ -216,51 +173,27 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope } // Start synchronously starts the computeSystem. -func (computeSystem *System) Start() (err error) { +func (computeSystem *System) Start(ctx context.Context) (err error) { + operation := "hcs::System::Start" + + // hcsStartComputeSystemContext is an async operation. Start the outer span + // here to measure the full start time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Start" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil) - } - - // This is a very simple backoff-retry loop to limit the number - // of parallel container starts if environment variable - // HCSSHIM_MAX_PARALLEL_START is set to a positive integer. - // It should generally only be used as a workaround to various - // platform issues that exist between RS1 and RS4 as of Aug 2018 - if currentContainerStarts.maxParallel > 0 { - for { - currentContainerStarts.Lock() - if currentContainerStarts.inProgress < currentContainerStarts.maxParallel { - currentContainerStarts.inProgress++ - currentContainerStarts.Unlock() - break - } - if currentContainerStarts.inProgress == currentContainerStarts.maxParallel { - currentContainerStarts.Unlock() - time.Sleep(100 * time.Millisecond) - } - } - // Make sure we decrement the count when we are done. - defer func() { - currentContainerStarts.Lock() - currentContainerStarts.inProgress-- - currentContainerStarts.Unlock() - }() + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } - var resultp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsStartComputeSystem(computeSystem.handle, "", &resultp) - }) - events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) + resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) if err != nil { - return makeSystemError(computeSystem, "Start", "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil @@ -271,360 +204,389 @@ func (computeSystem *System) ID() string { return computeSystem.id } -// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true, -// it may not actually be shut down until Wait() succeeds. -func (computeSystem *System) Shutdown() (err error) { +// Shutdown requests a compute system shutdown. +func (computeSystem *System) Shutdown(ctx context.Context) error { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Shutdown" - computeSystem.logOperationBegin(operation) - defer func() { - if IsAlreadyStopped(err) { - computeSystem.logOperationEnd(operation, nil) - } else { - computeSystem.logOperationEnd(operation, err) - } - }() + operation := "hcs::System::Shutdown" if computeSystem.handle == 0 { - return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil) + return nil } - var resultp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp) - }) - events := processHcsResult(resultp) - if err != nil { - return makeSystemError(computeSystem, "Shutdown", "", err, events) + resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) } - return nil } -// Terminate requests a compute system terminate, if IsPending() on the error returned is true, -// it may not actually be shut down until Wait() succeeds. -func (computeSystem *System) Terminate() (err error) { +// Terminate requests a compute system terminate. +func (computeSystem *System) Terminate(ctx context.Context) error { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Terminate" - computeSystem.logOperationBegin(operation) - defer func() { - if IsPending(err) { - computeSystem.logOperationEnd(operation, nil) - } else { - computeSystem.logOperationEnd(operation, err) - } - }() + operation := "hcs::System::Terminate" if computeSystem.handle == 0 { - return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil) + return nil } - var resultp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp) - }) - events := processHcsResult(resultp) - if err != nil && err != ErrVmcomputeAlreadyStopped { - return makeSystemError(computeSystem, "Terminate", "", err, events) + resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) } - return nil } -// Wait synchronously waits for the compute system to shutdown or terminate. -func (computeSystem *System) Wait() (err error) { - operation := "hcsshim::ComputeSystem::Wait" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() - - err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - if err != nil { - return makeSystemError(computeSystem, "Wait", "", err, nil) - } - - return nil +// waitBackground waits for the compute system exit notification. Once received +// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `computeSystem.handle` but `Wait` is +// safe to call multiple times. +func (computeSystem *System) waitBackground() { + operation := "hcs::System::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + switch err { + case nil: + log.G(ctx).Debug("system exited") + case ErrVmcomputeUnexpectedExit: + log.G(ctx).Debug("unexpected system exit") + computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) + err = nil + default: + err = makeSystemError(computeSystem, operation, err, nil) + } + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = err + close(computeSystem.waitBlock) + }) + oc.SetSpanStatus(span, err) } -// WaitExpectedError synchronously waits for the compute system to shutdown or -// terminate, and ignores the passed error if it occurs. -func (computeSystem *System) WaitExpectedError(expected error) (err error) { - operation := "hcsshim::ComputeSystem::WaitExpectedError" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() +// Wait synchronously waits for the compute system to shutdown or terminate. If +// the compute system has already exited returns the previous error (if any). +func (computeSystem *System) Wait() error { + <-computeSystem.waitBlock + return computeSystem.waitError +} - err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - if err != nil && getInnerError(err) != expected { - return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil) +// ExitError returns an error describing the reason the compute system terminated. +func (computeSystem *System) ExitError() error { + select { + case <-computeSystem.waitBlock: + if computeSystem.waitError != nil { + return computeSystem.waitError + } + return computeSystem.exitError + default: + return errors.New("container not exited") } - - return nil } -// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse. -// If the timeout expires, IsTimeout(err) == true -func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) { - operation := "hcsshim::ComputeSystem::WaitTimeout" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() +// Properties returns the requested container properties targeting a V1 schema container. +func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Properties" - err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout) + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) if err != nil { - return makeSystemError(computeSystem, "WaitTimeout", "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } - return nil + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &schema1.ContainerProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil } -func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) { +// PropertiesV2 returns the requested container properties targeting a V2 schema container. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Properties" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() + operation := "hcs::System::PropertiesV2" - queryj, err := json.Marshal(schema1.PropertyQuery{types}) + queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) if err != nil { - return nil, makeSystemError(computeSystem, "Properties", "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } - logrus.WithFields(computeSystem.logctx). - WithField(logfields.JSON, queryj). - Debug("HCS ComputeSystem Properties Query") - - var resultp, propertiesp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp) - }) - events := processHcsResult(resultp) + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, makeSystemError(computeSystem, "Properties", "", err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } - if propertiesp == nil { + if propertiesJSON == "" { return nil, ErrUnexpectedValue } - propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp) - properties := &schema1.ContainerProperties{} - if err := json.Unmarshal(propertiesRaw, properties); err != nil { - return nil, makeSystemError(computeSystem, "Properties", "", err, nil) + properties := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) } return properties, nil } // Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Pause() (err error) { +func (computeSystem *System) Pause(ctx context.Context) (err error) { + operation := "hcs::System::Pause" + + // hcsPauseComputeSystemContext is an async peration. Start the outer span + // here to measure the full pause time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Pause" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil) + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } - var resultp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp) - }) - events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) if err != nil { - return makeSystemError(computeSystem, "Pause", "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil } // Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Resume() (err error) { +func (computeSystem *System) Resume(ctx context.Context) (err error) { + operation := "hcs::System::Resume" + + // hcsResumeComputeSystemContext is an async operation. Start the outer span + // here to measure the full restore time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Resume" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil) + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } - var resultp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp) - }) - events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) if err != nil { - return makeSystemError(computeSystem, "Resume", "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil } -// CreateProcess launches a new process within the computeSystem. -func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) { +// Save the compute system +func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { + operation := "hcs::System::Save" + + // hcsSaveComputeSystemContext is an async peration. Start the outer span + // here to measure the full save time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + saveOptions, err := json.Marshal(options) + if err != nil { + return err + } + computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::CreateProcess" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } - var ( - processInfo hcsProcessInformation - processHandle hcsProcess - resultp *uint16 - ) + result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() if computeSystem.handle == 0 { - return nil, makeSystemError(computeSystem, "CreateProcess", "", ErrAlreadyClosed, nil) + return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } configurationb, err := json.Marshal(c) if err != nil { - return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil) + return nil, nil, makeSystemError(computeSystem, operation, err, nil) } configuration := string(configurationb) + processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, events) + } - logrus.WithFields(computeSystem.logctx). - WithField(logfields.JSON, configuration). - Debug("HCS ComputeSystem Process Document") + log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") + return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil +} - syscallWatcher(computeSystem.logctx, func() { - err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp) - }) - events := processHcsResult(resultp) +// CreateProcess launches a new process within the computeSystem. +func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { + operation := "hcs::System::CreateProcess" + process, processInfo, err := computeSystem.createProcess(ctx, operation, c) if err != nil { - return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events) + return nil, err } + defer func() { + if err != nil { + process.Close() + } + }() - logrus.WithFields(computeSystem.logctx). - WithField(logfields.ProcessID, processInfo.ProcessId). - Debug("HCS ComputeSystem CreateProcess PID") - - process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem) - process.cachedPipes = &cachedPipes{ - stdIn: processInfo.StdInput, - stdOut: processInfo.StdOutput, - stdErr: processInfo.StdError, + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) } + process.stdin = pipes[0] + process.stdout = pipes[1] + process.stderr = pipes[2] + process.hasCachedStdio = true - if err = process.registerCallback(); err != nil { - return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil) + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) } + go process.waitBackground() return process, nil } // OpenProcess gets an interface to an existing process within the computeSystem. -func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) { +func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - // Add PID for the context of this operation - computeSystem.logctx[logfields.ProcessID] = pid - defer delete(computeSystem.logctx, logfields.ProcessID) - - operation := "hcsshim::ComputeSystem::OpenProcess" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() - - var ( - processHandle hcsProcess - resultp *uint16 - ) + operation := "hcs::System::OpenProcess" if computeSystem.handle == 0 { - return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil) + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } - syscallWatcher(computeSystem.logctx, func() { - err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp) - }) - events := processHcsResult(resultp) + processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) + events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } process := newProcess(processHandle, pid, computeSystem) - if err = process.registerCallback(); err != nil { - return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil) + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) } + go process.waitBackground() return process, nil } // Close cleans up any state associated with the compute system but does not terminate or wait for it. func (computeSystem *System) Close() (err error) { + operation := "hcs::System::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + computeSystem.handleLock.Lock() defer computeSystem.handleLock.Unlock() - operation := "hcsshim::ComputeSystem::Close" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() - // Don't double free this if computeSystem.handle == 0 { return nil } - if err = computeSystem.unregisterCallback(); err != nil { - return makeSystemError(computeSystem, "Close", "", err, nil) + if err = computeSystem.unregisterCallback(ctx); err != nil { + return makeSystemError(computeSystem, operation, err, nil) } - syscallWatcher(computeSystem.logctx, func() { - err = hcsCloseComputeSystem(computeSystem.handle) - }) + err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) if err != nil { - return makeSystemError(computeSystem, "Close", "", err, nil) + return makeSystemError(computeSystem, operation, err, nil) } computeSystem.handle = 0 + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = ErrAlreadyClosed + close(computeSystem.waitBlock) + }) return nil } -func (computeSystem *System) registerCallback() error { - context := ¬ifcationWatcherContext{ - channels: newChannels(), +func (computeSystem *System) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newSystemChannels(), + systemID: computeSystem.id, } callbackMapLock.Lock() callbackNumber := nextCallback nextCallback++ - callbackMap[callbackNumber] = context + callbackMap[callbackNumber] = callbackContext callbackMapLock.Unlock() - var callbackHandle hcsCallback - err := hcsRegisterComputeSystemCallback(computeSystem.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) + callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) if err != nil { return err } - context.handle = callbackHandle + callbackContext.handle = callbackHandle computeSystem.callbackNumber = callbackNumber return nil } -func (computeSystem *System) unregisterCallback() error { +func (computeSystem *System) unregisterCallback(ctx context.Context) error { callbackNumber := computeSystem.callbackNumber callbackMapLock.RLock() - context := callbackMap[callbackNumber] + callbackContext := callbackMap[callbackNumber] callbackMapLock.RUnlock() - if context == nil { + if callbackContext == nil { return nil } - handle := context.handle + handle := callbackContext.handle if handle == 0 { return nil @@ -632,53 +594,43 @@ func (computeSystem *System) unregisterCallback() error { // hcsUnregisterComputeSystemCallback has its own syncronization // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := hcsUnregisterComputeSystemCallback(handle) + err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) if err != nil { return err } - closeChannels(context.channels) + closeChannels(callbackContext.channels) callbackMapLock.Lock() - callbackMap[callbackNumber] = nil + delete(callbackMap, callbackNumber) callbackMapLock.Unlock() - handle = 0 + handle = 0 //nolint:ineffassign return nil } // Modify the System by sending a request to HCS -func (computeSystem *System) Modify(config interface{}) (err error) { +func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - operation := "hcsshim::ComputeSystem::Modify" - computeSystem.logOperationBegin(operation) - defer func() { computeSystem.logOperationEnd(operation, err) }() + operation := "hcs::System::Modify" if computeSystem.handle == 0 { - return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil) + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } - requestJSON, err := json.Marshal(config) + requestBytes, err := json.Marshal(config) if err != nil { return err } - requestString := string(requestJSON) - - logrus.WithFields(computeSystem.logctx). - WithField(logfields.JSON, requestString). - Debug("HCS ComputeSystem Modify Document") - - var resultp *uint16 - syscallWatcher(computeSystem.logctx, func() { - err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp) - }) - events := processHcsResult(resultp) + requestJSON := string(requestBytes) + resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) + events := processHcsResult(ctx, resultJSON) if err != nil { - return makeSystemError(computeSystem, "Modify", requestString, err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go index a638677ed..3342e5bb9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -1,10 +1,15 @@ package hcs import ( + "context" "io" "syscall" "github.com/Microsoft/go-winio" + diskutil "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/computestorage" + "github.com/pkg/errors" + "golang.org/x/sys/windows" ) // makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles @@ -31,3 +36,27 @@ func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { } return fs, nil } + +// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. +func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { + if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { + return errors.Wrap(err, "failed to create VHD") + } + + vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) + if err != nil { + return errors.Wrap(err, "failed to open VHD") + } + defer func() { + err2 := windows.CloseHandle(windows.Handle(vhd)) + if err == nil { + err = errors.Wrap(err2, "failed to close VHD") + } + }() + + if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { + return errors.Wrap(err, "failed to format VHD") + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go index 91e212c57..db4e14fdf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -1,28 +1,34 @@ package hcs import ( + "context" "time" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/log" ) -func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { - events := processHcsResult(resultp) +func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { + events := processHcsResult(ctx, resultJSON) if IsPending(err) { - return nil, waitForNotification(callbackNumber, expectedNotification, timeout) + return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) } return events, err } -func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { +func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { callbackMapLock.RLock() + if _, ok := callbackMap[callbackNumber]; !ok { + callbackMapLock.RUnlock() + log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") + return ErrHandleClose + } channels := callbackMap[callbackNumber].channels callbackMapLock.RUnlock() expectedChannel := channels[expectedNotification] if expectedChannel == nil { - logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification) + log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") return ErrInvalidNotificationType } @@ -59,5 +65,4 @@ func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotific case <-c: return ErrTimeout } - return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go deleted file mode 100644 index f85ed3187..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go +++ /dev/null @@ -1,41 +0,0 @@ -package hcs - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/sirupsen/logrus" -) - -// syscallWatcher is used as a very simple goroutine around calls into -// the platform. In some cases, we have seen HCS APIs not returning due to -// various bugs, and the goroutine making the syscall ends up not returning, -// prior to its async callback. By spinning up a syscallWatcher, it allows -// us to at least log a warning if a syscall doesn't complete in a reasonable -// amount of time. -// -// Usage is: -// -// syscallWatcher(logContext, func() { -// err = (args...) -// }) -// - -func syscallWatcher(logContext logrus.Fields, syscallLambda func()) { - ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher) - defer cancel() - go watchFunc(ctx, logContext) - syscallLambda() -} - -func watchFunc(ctx context.Context, logContext logrus.Fields) { - select { - case <-ctx.Done(): - if ctx.Err() != context.Canceled { - logrus.WithFields(logContext). - WithField(logfields.Timeout, timeout.SyscallWatcher). - Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") - } - } -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 59ec7004c..7cf954c7b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -3,6 +3,7 @@ package hns import ( "encoding/json" "net" + "strings" "github.com/sirupsen/logrus" ) @@ -16,16 +17,21 @@ type HNSEndpoint struct { Policies []json.RawMessage `json:",omitempty"` MacAddress string `json:",omitempty"` IPAddress net.IP `json:",omitempty"` + IPv6Address net.IP `json:",omitempty"` DNSSuffix string `json:",omitempty"` DNSServerList string `json:",omitempty"` + DNSDomain string `json:",omitempty"` GatewayAddress string `json:",omitempty"` + GatewayAddressV6 string `json:",omitempty"` EnableInternalDNS bool `json:",omitempty"` DisableICC bool `json:",omitempty"` PrefixLength uint8 `json:",omitempty"` + IPv6PrefixLength uint8 `json:",omitempty"` IsRemoteEndpoint bool `json:",omitempty"` EnableLowMetric bool `json:",omitempty"` Namespace *Namespace `json:",omitempty"` EncapOverhead uint16 `json:",omitempty"` + SharedContainers []string `json:",omitempty"` } //SystemType represents the type of the system on which actions are done @@ -53,6 +59,18 @@ type EndpointResquestResponse struct { Error string } +// EndpointStats is the object that has stats for a given endpoint +type EndpointStats struct { + BytesReceived uint64 `json:"BytesReceived"` + BytesSent uint64 `json:"BytesSent"` + DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` + DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` + EndpointID string `json:"EndpointId"` + InstanceID string `json:"InstanceId"` + PacketsReceived uint64 `json:"PacketsReceived"` + PacketsSent uint64 `json:"PacketsSent"` +} + // HNSEndpointRequest makes a HNS call to modify/query a network endpoint func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { endpoint := &HNSEndpoint{} @@ -75,11 +93,27 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) { return endpoint, nil } +// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID +func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { + var stats EndpointStats + err := hnsCall("GET", "/endpointstats/"+id, "", &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + // GetHNSEndpointByID get the Endpoint by ID func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { return HNSEndpointRequest("GET", endpointID, "") } +// GetHNSEndpointStats get the stats for a n Endpoint by ID +func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { + return hnsEndpointStatsRequest(endpointID) +} + // GetHNSEndpointByName gets the endpoint filtered by Name func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { hnsResponse, err := HNSListEndpointRequest() @@ -94,6 +128,27 @@ func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { return nil, EndpointNotFoundError{EndpointName: endpointName} } +type endpointAttachInfo struct { + SharedContainers json.RawMessage `json:",omitempty"` +} + +func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { + attachInfo := endpointAttachInfo{} + err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) + + // Return false allows us to just return the err + if err != nil { + return false, err + } + + if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { + return true, nil + } + + return false, nil + +} + // Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { operation := "Create" @@ -151,6 +206,27 @@ func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { return err } +// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { + operation := "ApplyProxyPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + // ContainerAttach attaches an endpoint to container func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { operation := "ContainerAttach" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go index 969d1b263..2df4a57f5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -9,23 +9,30 @@ import ( "github.com/sirupsen/logrus" ) -func hnsCall(method, path, request string, returnResponse interface{}) error { +func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { var responseBuffer *uint16 logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) err := _hnsCall(method, path, request, &responseBuffer) if err != nil { - return hcserror.New(err, "hnsCall ", "") + return nil, hcserror.New(err, "hnsCall ", "") } response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) hnsresponse := &hnsResponse{} if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return err + return nil, err } + return hnsresponse, nil +} +func hnsCall(method, path, request string, returnResponse interface{}) error { + hnsresponse, err := hnsCallRawResponse(method, path, request) + if err != nil { + return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + } if !hnsresponse.Success { - return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) + return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) } if len(hnsresponse.Output) == 0 { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go index 7e859de91..f12d3ab04 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -2,9 +2,9 @@ package hns import ( "encoding/json" - "net" - + "errors" "github.com/sirupsen/logrus" + "net" ) // Subnet is assoicated with a network and represents a list @@ -39,12 +39,6 @@ type HNSNetwork struct { AutomaticDNS bool `json:",omitempty"` } -type hnsNetworkResponse struct { - Success bool - Error string - Output HNSNetwork -} - type hnsResponse struct { Success bool Error string @@ -98,6 +92,12 @@ func (network *HNSNetwork) Create() (*HNSNetwork, error) { title := "hcsshim::HNSNetwork::" + operation logrus.Debugf(title+" id=%s", network.Id) + for _, subnet := range network.Subnets { + if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + } + jsonString, err := json.Marshal(network) if err != nil { return nil, err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 2318a4fce..591a2631e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -17,13 +17,14 @@ const ( OutboundNat PolicyType = "OutBoundNAT" ExternalLoadBalancer PolicyType = "ELB" Route PolicyType = "ROUTE" + Proxy PolicyType = "PROXY" ) type NatPolicy struct { Type PolicyType `json:"Type"` - Protocol string - InternalPort uint16 - ExternalPort uint16 + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` } type QosPolicy struct { @@ -55,8 +56,18 @@ type PaPolicy struct { type OutboundNatPolicy struct { Policy - VIP string `json:"VIP,omitempty"` - Exceptions []string `json:"ExceptionList,omitempty"` + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` + Destinations []string `json:",omitempty"` +} + +type ProxyPolicy struct { + Type PolicyType `json:"Type"` + IP string `json:",omitempty"` + Port string `json:",omitempty"` + ExceptionList []string `json:",omitempty"` + Destination string `json:",omitempty"` + OutboundNat bool `json:",omitempty"` } type ActionType string @@ -77,20 +88,20 @@ const ( type ACLPolicy struct { Type PolicyType `json:"Type"` Id string `json:"Id,omitempty"` - Protocol uint16 - Protocols string `json:"Protocols,omitempty"` - InternalPort uint16 + Protocol uint16 `json:",omitempty"` + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 `json:",omitempty"` Action ActionType Direction DirectionType - LocalAddresses string - RemoteAddresses string - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 - ServiceName string + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` } type Policy struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go index 45e2281b0..d3b04eefe 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -27,9 +27,10 @@ type namespaceResourceRequest struct { } type Namespace struct { - ID string - IsDefault bool `json:",omitempty"` - ResourceList []NamespaceResource `json:",omitempty"` + ID string + IsDefault bool `json:",omitempty"` + ResourceList []NamespaceResource `json:",omitempty"` + CompartmentId uint32 `json:",omitempty"` } func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go index 2f6ec029e..922f7c679 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -15,10 +15,6 @@ func ConvertAndFreeCoTaskMemString(buffer *uint16) string { return str } -func ConvertAndFreeCoTaskMemBytes(buffer *uint16) []byte { - return []byte(ConvertAndFreeCoTaskMemString(buffer)) -} - func Win32FromHresult(hr uintptr) syscall.Errno { if hr&0x1fff0000 == 0x00070000 { return syscall.Errno(hr & 0xffff) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go new file mode 100644 index 000000000..ba6b1a4a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` +// contains an OpenCensus `trace.Span`. +func G(ctx context.Context) *logrus.Entry { + span := trace.FromContext(ctx) + if span != nil { + sctx := span.SpanContext() + return logrus.WithFields(logrus.Fields{ + "traceID": sctx.TraceID.String(), + "spanID": sctx.SpanID.String(), + // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? + }) + } + return logrus.NewEntry(logrus.StandardLogger()) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go new file mode 100644 index 000000000..f428bdaf7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -0,0 +1,43 @@ +package oc + +import ( + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +var _ = (trace.Exporter)(&LogrusExporter{}) + +// LogrusExporter is an OpenCensus `trace.Exporter` that exports +// `trace.SpanData` to logrus output. +type LogrusExporter struct { +} + +// ExportSpan exports `s` based on the the following rules: +// +// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, +// `s.ParentSpanID` for correlation +// +// 2. Any calls to .Annotate will not be supported. +// +// 3. The span itself will be written at `logrus.InfoLevel` unless +// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` +// providing `s.Status.Message` as the error value. +func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { + // Combine all span annotations with traceID, spanID, parentSpanID + baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) + baseEntry.Data["traceID"] = s.TraceID.String() + baseEntry.Data["spanID"] = s.SpanID.String() + baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() + baseEntry.Data["startTime"] = s.StartTime + baseEntry.Data["endTime"] = s.EndTime + baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() + baseEntry.Data["name"] = s.Name + baseEntry.Time = s.StartTime + + level := logrus.InfoLevel + if s.Status.Code != 0 { + level = logrus.ErrorLevel + baseEntry.Data[logrus.ErrorKey] = s.Status.Message + } + baseEntry.Log(level, "Span") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go new file mode 100644 index 000000000..fee4765cb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -0,0 +1,17 @@ +package oc + +import ( + "go.opencensus.io/trace" +) + +// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If +// `err` is `nil` assumes `trace.StatusCodeOk`. +func SetSpanStatus(span *trace.Span, err error) { + status := trace.Status{} + if err != nil { + // TODO: JTERRY75 - Handle errors in a non-generic way + status.Code = trace.StatusCodeUnknown + status.Message = err.Error() + } + span.SetStatus(status) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go index 6c4a64156..dcbc9334d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go @@ -9,6 +9,7 @@ import ( "reflect" "syscall" + "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" ) @@ -19,7 +20,6 @@ import ( const ( _REG_OPTION_VOLATILE = 1 - _REG_CREATED_NEW_KEY = 1 _REG_OPENED_EXISTING_KEY = 2 ) @@ -34,11 +34,11 @@ var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} var rootPath = `SOFTWARE\Microsoft\runhcs` type NotFoundError struct { - Id string + ID string } func (err *NotFoundError) Error() string { - return fmt.Sprintf("ID '%s' was not found", err.Id) + return fmt.Sprintf("ID '%s' was not found", err.ID) } func IsNotFoundError(err error) bool { @@ -61,7 +61,8 @@ func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExi d uint32 ) fullpath := filepath.Join(k.Name, path) - err = regCreateKeyEx(syscall.Handle(k.Key), syscall.StringToUTF16Ptr(path), 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) + pathPtr, _ := windows.UTF16PtrFromString(path) + err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) if err != nil { return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go index 3015c3640..a161c204e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go @@ -10,7 +10,7 @@ import ( "syscall" "time" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" ) // ContainerState represents the platform agnostic pieces relating to a @@ -51,7 +51,7 @@ func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { extra := "" if p != nil { - p.Kill() + _ = p.Kill() state, err := p.Wait() if err != nil { panic(err) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index f31edfaf8..66b8d7e03 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -11,72 +11,11 @@ import ( "unsafe" "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/winapi" winio "github.com/Microsoft/go-winio" ) -//go:generate go run $GOROOT\src\syscall\mksyscall_windows.go -output zsyscall_windows.go safeopen.go - -//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile -//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile -//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc -//sys localFree(ptr uintptr) = kernel32.LocalFree - -type ioStatusBlock struct { - Status, Information uintptr -} - -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName uintptr - Attributes uintptr - SecurityDescriptor uintptr - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -type fileLinkInformation struct { - ReplaceIfExists bool - RootDirectory uintptr - FileNameLength uint32 - FileName [1]uint16 -} - -type fileDispositionInformationEx struct { - Flags uintptr -} - -const ( - _FileLinkInformation = 11 - _FileDispositionInformationEx = 64 - - FILE_READ_ATTRIBUTES = 0x0080 - FILE_WRITE_ATTRIBUTES = 0x0100 - DELETE = 0x10000 - - FILE_OPEN = 1 - FILE_CREATE = 2 - - FILE_DIRECTORY_FILE = 0x00000001 - FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 - FILE_DELETE_ON_CLOSE = 0x00001000 - FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 - FILE_OPEN_REPARSE_POINT = 0x00200000 - - FILE_DISPOSITION_DELETE = 0x00000001 - - _OBJ_DONT_REPARSE = 0x1000 - - _STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B -) - func OpenRoot(path string) (*os.File, error) { longpath, err := longpath.LongAbs(path) if err != nil { @@ -85,16 +24,24 @@ func OpenRoot(path string) (*os.File, error) { return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) } -func ntRelativePath(path string) ([]uint16, error) { +func cleanGoStringRelativePath(path string) (string, error) { path = filepath.Clean(path) if strings.Contains(path, ":") { // Since alternate data streams must follow the file they // are attached to, finding one here (out of order) is invalid. - return nil, errors.New("path contains invalid character `:`") + return "", errors.New("path contains invalid character `:`") } fspath := filepath.FromSlash(path) if len(fspath) > 0 && fspath[0] == '\\' { - return nil, errors.New("expected relative path") + return "", errors.New("expected relative path") + } + return fspath, nil +} + +func ntRelativePath(path string) ([]uint16, error) { + fspath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err } path16 := utf16.Encode(([]rune)(fspath)) @@ -110,11 +57,11 @@ func ntRelativePath(path string) ([]uint16, error) { func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { var ( h uintptr - iosb ioStatusBlock - oa objectAttributes + iosb winapi.IOStatusBlock + oa winapi.ObjectAttributes ) - path16, err := ntRelativePath(path) + cleanRelativePath, err := cleanGoStringRelativePath(path) if err != nil { return nil, err } @@ -123,20 +70,16 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl return nil, errors.New("missing root directory") } - upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2) - defer localFree(upathBuffer) - - upath := (*unicodeString)(unsafe.Pointer(upathBuffer)) - upath.Length = uint16(len(path16) * 2) - upath.MaximumLength = upath.Length - upath.Buffer = upathBuffer + unsafe.Sizeof(*upath) - copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16) + pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) + if err != nil { + return nil, err + } oa.Length = unsafe.Sizeof(oa) - oa.ObjectName = upathBuffer + oa.ObjectName = pathUnicode oa.RootDirectory = uintptr(root.Fd()) - oa.Attributes = _OBJ_DONT_REPARSE - status := ntCreateFile( + oa.Attributes = winapi.OBJ_DONT_REPARSE + status := winapi.NtCreateFile( &h, accessMask|syscall.SYNCHRONIZE, &oa, @@ -145,12 +88,12 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl 0, shareFlags, createDisposition, - FILE_OPEN_FOR_BACKUP_INTENT|FILE_SYNCHRONOUS_IO_NONALERT|flags, + winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, nil, 0, ) if status != 0 { - return nil, rtlNtStatusToDosError(status) + return nil, winapi.RtlNtStatusToDosError(status) } fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) @@ -182,7 +125,7 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. oldroot, syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, + winapi.FILE_OPEN, 0, ) if err != nil { @@ -199,8 +142,8 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. newroot, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, - FILE_DIRECTORY_FILE) + winapi.FILE_OPEN, + winapi.FILE_DIRECTORY_FILE) if err != nil { return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} } @@ -211,7 +154,7 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. return err } if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)} + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} } } else { @@ -227,24 +170,25 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. return err } - size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2 - linkinfoBuffer := localAlloc(0, size) - defer localFree(linkinfoBuffer) - linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := winapi.LocalAlloc(0, size) + defer winapi.LocalFree(linkinfoBuffer) + + linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) linkinfo.RootDirectory = parent.Fd() linkinfo.FileNameLength = uint32(len(newbase16) * 2) - copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16) + copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) - var iosb ioStatusBlock - status := ntSetInformationFile( + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( oldf.Fd(), &iosb, linkinfoBuffer, uint32(size), - _FileLinkInformation, + winapi.FileLinkInformationClass, ) if status != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)} + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} } return nil @@ -252,17 +196,17 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. // deleteOnClose marks a file to be deleted when the handle is closed. func deleteOnClose(f *os.File) error { - disposition := fileDispositionInformationEx{Flags: FILE_DISPOSITION_DELETE} - var iosb ioStatusBlock - status := ntSetInformationFile( + disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( f.Fd(), &iosb, uintptr(unsafe.Pointer(&disposition)), uint32(unsafe.Sizeof(disposition)), - _FileDispositionInformationEx, + winapi.FileDispositionInformationExClass, ) if status != 0 { - return rtlNtStatusToDosError(status) + return winapi.RtlNtStatusToDosError(status) } return nil } @@ -291,16 +235,16 @@ func RemoveRelative(path string, root *os.File) error { f, err := openRelativeInternal( path, root, - FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|DELETE, + winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, - FILE_OPEN_REPARSE_POINT) + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) if err == nil { defer f.Close() err = deleteOnClose(f) if err == syscall.ERROR_ACCESS_DENIED { // Maybe the file is marked readonly. Clear the bit and retry. - clearReadOnly(f) + _ = clearReadOnly(f) err = deleteOnClose(f) } } @@ -385,8 +329,8 @@ func MkdirRelative(path string, root *os.File) error { root, 0, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_CREATE, - FILE_DIRECTORY_FILE) + winapi.FILE_CREATE, + winapi.FILE_DIRECTORY_FILE) if err == nil { f.Close() } else { @@ -401,10 +345,10 @@ func LstatRelative(path string, root *os.File) (os.FileInfo, error) { f, err := openRelativeInternal( path, root, - FILE_READ_ATTRIBUTES, + winapi.FILE_READ_ATTRIBUTES, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, - FILE_OPEN_REPARSE_POINT) + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) if err != nil { return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} } @@ -421,7 +365,7 @@ func EnsureNotReparsePointRelative(path string, root *os.File) error { root, 0, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, + winapi.FILE_OPEN, 0) if err != nil { return err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go deleted file mode 100644 index 709b9d347..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package safefile - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") -) - -func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) - status = uint32(r0) - return -} - -func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) - status = uint32(r0) - return -} - -func rtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func localAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) - ptr = uintptr(r0) - return -} - -func localFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go deleted file mode 100644 index 27d0b8c48..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory2 struct { - SizeInMB int32 `json:"SizeInMB,omitempty"` - - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` - - EnableHotHint bool `json:"EnableHotHint,omitempty"` - - EnableColdHint bool `json:"EnableColdHint,omitempty"` - - EnableEpf bool `json:"EnableEpf,omitempty"` - - // EnableDeferredCommit is private in the schema. If regenerated need to add back. - EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go index ff3b6572e..eaf39fa51 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -29,6 +29,9 @@ var ( // SystemResume is the timeout for resuming a compute system SystemResume time.Duration = defaultTimeout + // SystemSave is the timeout for saving a compute system + SystemSave time.Duration = defaultTimeout + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. SyscallWatcher time.Duration = defaultTimeout @@ -51,6 +54,7 @@ func init() { SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go new file mode 100644 index 000000000..e7f114b67 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -0,0 +1,610 @@ +package vmcompute + +import ( + gcontext "context" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "go.opencensus.io/trace" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? +//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? +//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? + +//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? +//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously +const errVmcomputeOperationPending = syscall.Errno(0xC0370103) + +// HcsSystem is the handle associated with a created compute system. +type HcsSystem syscall.Handle + +// HcsProcess is the handle associated with a created process in a compute +// system. +type HcsProcess syscall.Handle + +// HcsCallback is the handle associated with the function to call when events +// occur. +type HcsCallback syscall.Handle + +// HcsProcessInformation is the structure used when creating or getting process +// info. +type HcsProcessInformation struct { + // ProcessId is the pid of the created process. + ProcessId uint32 + reserved uint32 //nolint:structcheck + // StdInput is the handle associated with the stdin of the process. + StdInput syscall.Handle + // StdOutput is the handle associated with the stdout of the process. + StdOutput syscall.Handle + // StdError is the handle associated with the stderr of the process. + StdError syscall.Handle +} + +func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { + if timeout > 0 { + var cancel gcontext.CancelFunc + ctx, cancel = gcontext.WithTimeout(ctx, timeout) + defer cancel() + } + + done := make(chan error, 1) + go func() { + done <- f() + }() + select { + case <-ctx.Done(): + if ctx.Err() == gcontext.DeadlineExceeded { + log.G(ctx).WithField(logfields.Timeout, timeout). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + } + return ctx.Err() + case err := <-done: + return err + } +} + +func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("query", query)) + + return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + computeSystemsp *uint16 + resultp *uint16 + ) + err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + if computeSystemsp != nil { + computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes( + trace.StringAttribute("id", id), + trace.StringAttribute("configuration", configuration)) + + return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { + var resultp *uint16 + err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenComputeSystem(id, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseComputeSystem(computeSystem) + }) +} + +func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemStart, func() error { + var resultp *uint16 + err := hcsStartComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsShutdownComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemPause, func() error { + var resultp *uint16 + err := hcsPauseComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemResume, func() error { + var resultp *uint16 + err := hcsResumeComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("configuration", configuration)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyServiceSettings(settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterComputeSystemCallback(callbackHandle) + }) +} + +func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) + + return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) + + return process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenProcess(computeSystem, pid, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseProcess(process) + }) +} + +func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateProcess(process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSignalProcess(process, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsGetProcessInfo(process, &processInformation, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + processPropertiesp *uint16 + resultp *uint16 + ) + err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) + if processPropertiesp != nil { + processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyProcess(process, settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterProcessCallback(callbackHandle) + }) +} + +func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSaveComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go similarity index 74% rename from vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go rename to vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go index fcd5cdc87..cae55058d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -1,6 +1,6 @@ // Code generated mksyscall_windows.exe DO NOT EDIT -package hcs +package vmcompute import ( "syscall" @@ -50,19 +50,21 @@ var ( procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") - - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") - procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") + procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") ) func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { @@ -88,7 +90,7 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result return } -func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(id) if hr != nil { @@ -102,7 +104,7 @@ func hcsCreateComputeSystem(id string, configuration string, identity syscall.Ha return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) } -func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { if hr = procHcsCreateComputeSystem.Find(); hr != nil { return } @@ -116,7 +118,7 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall return } -func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) { +func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(id) if hr != nil { @@ -125,7 +127,7 @@ func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) return _hcsOpenComputeSystem(_p0, computeSystem, result) } -func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16) (hr error) { +func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { if hr = procHcsOpenComputeSystem.Find(); hr != nil { return } @@ -139,7 +141,7 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16 return } -func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) { +func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { if hr = procHcsCloseComputeSystem.Find(); hr != nil { return } @@ -153,7 +155,7 @@ func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) { return } -func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { +func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { @@ -162,7 +164,7 @@ func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uin return _hcsStartComputeSystem(computeSystem, _p0, result) } -func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { +func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { if hr = procHcsStartComputeSystem.Find(); hr != nil { return } @@ -176,7 +178,7 @@ func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **u return } -func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { +func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { @@ -185,7 +187,7 @@ func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result ** return _hcsShutdownComputeSystem(computeSystem, _p0, result) } -func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { +func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { if hr = procHcsShutdownComputeSystem.Find(); hr != nil { return } @@ -199,7 +201,7 @@ func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result return } -func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { +func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { @@ -208,7 +210,7 @@ func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result * return _hcsTerminateComputeSystem(computeSystem, _p0, result) } -func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { +func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { if hr = procHcsTerminateComputeSystem.Find(); hr != nil { return } @@ -222,7 +224,7 @@ func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result return } -func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { +func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { @@ -231,7 +233,7 @@ func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uin return _hcsPauseComputeSystem(computeSystem, _p0, result) } -func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { +func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { if hr = procHcsPauseComputeSystem.Find(); hr != nil { return } @@ -245,7 +247,7 @@ func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **u return } -func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { +func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { @@ -254,7 +256,7 @@ func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **ui return _hcsResumeComputeSystem(computeSystem, _p0, result) } -func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { +func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { if hr = procHcsResumeComputeSystem.Find(); hr != nil { return } @@ -268,7 +270,7 @@ func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result ** return } -func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { +func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(propertyQuery) if hr != nil { @@ -277,7 +279,7 @@ func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) } -func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { +func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { return } @@ -291,7 +293,7 @@ func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint return } -func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) { +func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(configuration) if hr != nil { @@ -300,7 +302,7 @@ func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, resul return _hcsModifyComputeSystem(computeSystem, _p0, result) } -func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, result **uint16) (hr error) { +func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { if hr = procHcsModifyComputeSystem.Find(); hr != nil { return } @@ -314,7 +316,30 @@ func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, res return } -func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyServiceSettings.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { return } @@ -328,7 +353,7 @@ func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, return } -func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) { +func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { return } @@ -342,7 +367,30 @@ func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) { return } -func hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsSaveComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(processParameters) if hr != nil { @@ -351,7 +399,7 @@ func hcsCreateProcess(computeSystem hcsSystem, processParameters string, process return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) } -func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { +func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { if hr = procHcsCreateProcess.Find(); hr != nil { return } @@ -365,7 +413,7 @@ func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, proce return } -func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) { +func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { if hr = procHcsOpenProcess.Find(); hr != nil { return } @@ -379,7 +427,7 @@ func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, re return } -func hcsCloseProcess(process hcsProcess) (hr error) { +func hcsCloseProcess(process HcsProcess) (hr error) { if hr = procHcsCloseProcess.Find(); hr != nil { return } @@ -393,7 +441,7 @@ func hcsCloseProcess(process hcsProcess) (hr error) { return } -func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) { +func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { if hr = procHcsTerminateProcess.Find(); hr != nil { return } @@ -407,7 +455,7 @@ func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) { return } -func hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) { +func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { @@ -416,11 +464,11 @@ func hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr e return _hcsSignalProcess(process, _p0, result) } -func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateProcess.Find(); hr != nil { +func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { + if hr = procHcsSignalProcess.Find(); hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -430,7 +478,7 @@ func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr return } -func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) { +func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { if hr = procHcsGetProcessInfo.Find(); hr != nil { return } @@ -444,7 +492,7 @@ func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInforma return } -func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) { +func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { if hr = procHcsGetProcessProperties.Find(); hr != nil { return } @@ -458,7 +506,7 @@ func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, res return } -func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) { +func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { @@ -467,7 +515,7 @@ func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr return _hcsModifyProcess(process, _p0, result) } -func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (hr error) { +func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { if hr = procHcsModifyProcess.Find(); hr != nil { return } @@ -504,7 +552,7 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result return } -func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { +func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { if hr = procHcsRegisterProcessCallback.Find(); hr != nil { return } @@ -518,7 +566,7 @@ func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context ui return } -func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) { +func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go index dcb919268..5debe974d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -1,32 +1,27 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // ActivateLayer will find the layer with the given id and mount it's filesystem. // For a read/write layer, the mounted filesystem will appear as a volume on the // host, while a read-only layer is generally expected to be a no-op. // An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(path string) (err error) { +func ActivateLayer(ctx context.Context, path string) (err error) { title := "hcsshim::ActivateLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = activateLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go index 5784241df..3ec708d1e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go @@ -1,6 +1,7 @@ package wclayer import ( + "context" "errors" "os" "path/filepath" @@ -8,10 +9,16 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "go.opencensus.io/trace" ) type baseLayerWriter struct { + ctx context.Context + s *trace.Span + root *os.File f *os.File bw *winio.BackupFileWriter @@ -31,7 +38,7 @@ type dirInfo struct { func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { for i := range dis { di := &dis[len(dis)-i-1] // reverse order: process child directories first - f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_OPEN, safefile.FILE_DIRECTORY_FILE) + f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) if err != nil { return err } @@ -41,6 +48,7 @@ func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { if err != nil { return err } + } return nil } @@ -86,14 +94,12 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e extraFlags := uint32(0) if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - extraFlags |= safefile.FILE_DIRECTORY_FILE - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) - } + extraFlags |= winapi.FILE_DIRECTORY_FILE + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) } mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, extraFlags) + f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) if err != nil { return hcserror.New(err, "Failed to safefile.OpenRelative", name) } @@ -136,12 +142,15 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) { return n, err } -func (w *baseLayerWriter) Close() error { +func (w *baseLayerWriter) Close() (err error) { + defer w.s.End() + defer func() { oc.SetSpanStatus(w.s, err) }() defer func() { w.root.Close() w.root = nil }() - err := w.closeCurrentFile() + + err = w.closeCurrentFile() if err != nil { return err } @@ -153,7 +162,7 @@ func (w *baseLayerWriter) Close() error { return err } - err = ProcessBaseLayer(w.root.Name()) + err = ProcessBaseLayer(w.ctx, w.root.Name()) if err != nil { return err } @@ -163,7 +172,7 @@ func (w *baseLayerWriter) Close() error { if err != nil { return err } - err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM")) + err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) if err != nil { return err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go index be2bc3fd6..480aee872 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -1,31 +1,27 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // CreateLayer creates a new, empty, read-only layer on the filesystem based on // the parent layer provided. -func CreateLayer(path, parent string) (err error) { +func CreateLayer(ctx context.Context, path, parent string) (err error) { title := "hcsshim::CreateLayer" - fields := logrus.Fields{ - "parent": parent, - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parent", parent)) err = createLayer(&stdDriverInfo, path, parent) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go index 7e3351289..131aa94f1 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -1,38 +1,34 @@ package wclayer import ( + "context" + "strings" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // CreateScratchLayer creates and populates new read-write layer for use by a container. -// This requires both the id of the direct parent layer, as well as the full list -// of paths to all parent layers up to the base (and including the direct parent -// whose id was provided). -func CreateScratchLayer(path string, parentLayerPaths []string) (err error) { +// This requires the full list of paths to all parent layers up to the base +func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { title := "hcsshim::CreateScratchLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } err = createSandboxLayer(&stdDriverInfo, path, 0, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go index 2dd5d5715..d5bf2f5bd 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -1,25 +1,20 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(path string) (err error) { +func DeactivateLayer(ctx context.Context, path string) (err error) { title := "hcsshim::DeactivateLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = deactivateLayer(&stdDriverInfo, path) if err != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go index 4da690c20..424467ac3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -1,30 +1,25 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // DestroyLayer will remove the on-disk files representing the layer with the given // path, including that layer's containing folder, if any. -func DestroyLayer(path string) (err error) { +func DestroyLayer(ctx context.Context, path string) (err error) { title := "hcsshim::DestroyLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = destroyLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index 651676fb2..035c9041e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -1,30 +1,140 @@ package wclayer import ( + "context" + "os" + "path/filepath" + "syscall" + "unsafe" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "go.opencensus.io/trace" ) // ExpandScratchSize expands the size of a layer to at least size bytes. -func ExpandScratchSize(path string, size uint64) (err error) { +func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { title := "hcsshim::ExpandScratchSize" - fields := logrus.Fields{ - "path": path, - "size": size, + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.Int64Attribute("size", int64(size))) + + err = expandSandboxSize(&stdDriverInfo, path, size) + if err != nil { + return hcserror.New(err, title, "") } - logrus.WithFields(fields).Debug(title) - defer func() { + + // Manually expand the volume now in order to work around bugs in 19H1 and + // prerelease versions of Vb. Remove once this is fixed in Windows. + if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { + err = expandSandboxVolume(ctx, path) if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") + return err } - }() + } + return nil +} - err = expandSandboxSize(&stdDriverInfo, path, size) +type virtualStorageType struct { + DeviceID uint32 + VendorID [16]byte +} + +type openVersion2 struct { + GetInfoOnly int32 // bool but 4-byte aligned + ReadOnly int32 // bool but 4-byte aligned + ResiliencyGUID [16]byte // GUID +} + +type openVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 openVersion2 +} + +func attachVhd(path string) (syscall.Handle, error) { + var ( + defaultType virtualStorageType + handle syscall.Handle + ) + parameters := openVirtualDiskParameters{Version: 2} + err := openVirtualDisk( + &defaultType, + path, + 0, + 0, + ¶meters, + &handle) if err != nil { - return hcserror.New(err, title+" - failed", "") + return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} + } + err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) + if err != nil { + syscall.Close(handle) + return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} + } + return handle, nil +} + +func expandSandboxVolume(ctx context.Context, path string) error { + // Mount the sandbox VHD temporarily. + vhdPath := filepath.Join(path, "sandbox.vhdx") + vhd, err := attachVhd(vhdPath) + if err != nil { + return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} + } + defer syscall.Close(vhd) + + // Open the volume. + volumePath, err := GetLayerMountPath(ctx, path) + if err != nil { + return err + } + if volumePath[len(volumePath)-1] == '\\' { + volumePath = volumePath[:len(volumePath)-1] + } + volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) + if err != nil { + return err + } + defer volume.Close() + + // Get the volume's underlying partition size in NTFS clusters. + var ( + partitionSize int64 + bytes uint32 + ) + const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) + if err != nil { + return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} + } + const ( + clusterSize = 4096 + sectorSize = 512 + ) + targetClusters := partitionSize / clusterSize + + // Get the volume's current size in NTFS clusters. + var volumeSize int64 + err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) + if err != nil { + return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} + } + volumeClusters := volumeSize / clusterSize + + // Only resize the volume if there is space to grow, otherwise this will + // fail with invalid parameter. NTFS reserves one cluster. + if volumeClusters+1 < targetClusters { + targetSectors := targetClusters * (clusterSize / sectorSize) + const _FSCTL_EXTEND_VOLUME = 0x000900F0 + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) + if err != nil { + return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} + } } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go index 0425b3395..97b27eb7d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -1,12 +1,15 @@ package wclayer import ( + "context" "io/ioutil" "os" + "strings" "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // ExportLayer will create a folder at exportFolderPath and fill that folder with @@ -14,31 +17,25 @@ import ( // format includes any metadata required for later importing the layer (using // ImportLayer), and requires the full list of parent layer paths in order to // perform the export. -func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) { +func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { title := "hcsshim::ExportLayer" - fields := logrus.Fields{ - "path": path, - "exportFolderPath": exportFolderPath, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("exportFolderPath", exportFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } @@ -52,25 +49,46 @@ type LayerReader interface { // NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. // The caller must have taken the SeBackupPrivilege privilege // to call this and any methods on the resulting LayerReader. -func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) { +func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + exportPath, err := ioutil.TempDir("", "hcs") if err != nil { return nil, err } - err = ExportLayer(path, exportPath, parentLayerPaths) + err = ExportLayer(ctx, path, exportPath, parentLayerPaths) if err != nil { os.RemoveAll(exportPath) return nil, err } - return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil + return &legacyLayerReaderWrapper{ + ctx: ctx, + s: span, + legacyLayerReader: newLegacyLayerReader(exportPath), + }, nil } type legacyLayerReaderWrapper struct { + ctx context.Context + s *trace.Span + *legacyLayerReader } -func (r *legacyLayerReaderWrapper) Close() error { - err := r.legacyLayerReader.Close() +func (r *legacyLayerReaderWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + + err = r.legacyLayerReader.Close() os.RemoveAll(r.root) return err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go index d60b6ed53..8d213f587 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -1,39 +1,33 @@ package wclayer import ( + "context" "syscall" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // GetLayerMountPath will look for a mounted layer with the given path and return // the path at which that layer can be accessed. This path may be a volume path // if the layer is a mounted read-write layer, otherwise it is expected to be the // folder path at which the layer is stored. -func GetLayerMountPath(path string) (_ string, err error) { +func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { title := "hcsshim::GetLayerMountPath" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() - - var mountPathLength uintptr - mountPathLength = 0 + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + var mountPathLength uintptr = 0 // Call the procedure itself. - logrus.WithFields(fields).Debug("Calling proc (1)") + log.G(ctx).Debug("Calling proc (1)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) if err != nil { - return "", hcserror.New(err, title+" - failed", "(first call)") + return "", hcserror.New(err, title, "(first call)") } // Allocate a mount path of the returned length. @@ -44,13 +38,13 @@ func GetLayerMountPath(path string) (_ string, err error) { mountPathp[0] = 0 // Call the procedure again - logrus.WithFields(fields).Debug("Calling proc (2)") + log.G(ctx).Debug("Calling proc (2)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) if err != nil { - return "", hcserror.New(err, title+" - failed", "(second call)") + return "", hcserror.New(err, title, "(second call)") } mountPath := syscall.UTF16ToString(mountPathp[0:]) - fields["mountPath"] = mountPath + span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) return mountPath, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go index dbd83ef2b..ae1fff840 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -1,29 +1,29 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // GetSharedBaseImages will enumerate the images stored in the common central // image store and return descriptive info about those images for the purpose // of registering them with the graphdriver, graph, and tagstore. -func GetSharedBaseImages() (imageData string, err error) { +func GetSharedBaseImages(ctx context.Context) (_ string, err error) { title := "hcsshim::GetSharedBaseImages" - logrus.Debug(title) - defer func() { - if err != nil { - logrus.WithError(err).Error(err) - } else { - logrus.WithField("imageData", imageData).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() var buffer *uint16 err = getBaseImages(&buffer) if err != nil { - return "", hcserror.New(err, title+" - failed", "") + return "", hcserror.New(err, title, "") } - return interop.ConvertAndFreeCoTaskMemString(buffer), nil + imageData := interop.ConvertAndFreeCoTaskMemString(buffer) + span.AddAttributes(trace.StringAttribute("imageData", imageData)) + return imageData, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go index 05735df6c..4b282fef9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -1,30 +1,26 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // GrantVmAccess adds access to a file for a given VM -func GrantVmAccess(vmid string, filepath string) (err error) { +func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { title := "hcsshim::GrantVmAccess" - fields := logrus.Fields{ - "vm-id": vmid, - "path": filepath, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("vm-id", vmid), + trace.StringAttribute("path", filepath)) err = grantVmAccess(vmid, filepath) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go index 76a804f2a..687550f0b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -1,45 +1,42 @@ package wclayer import ( + "context" "io/ioutil" "os" "path/filepath" + "strings" "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/sirupsen/logrus" + "go.opencensus.io/trace" ) // ImportLayer will take the contents of the folder at importFolderPath and import // that into a layer with the id layerId. Note that in order to correctly populate // the layer and interperet the transport format, all parent layers must already // be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) { +func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { title := "hcsshim::ImportLayer" - fields := logrus.Fields{ - "path": path, - "importFolderPath": importFolderPath, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("importFolderPath", importFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } err = importLayer(&stdDriverInfo, path, importFolderPath, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } @@ -60,20 +57,26 @@ type LayerWriter interface { } type legacyLayerWriterWrapper struct { + ctx context.Context + s *trace.Span + *legacyLayerWriter path string parentLayerPaths []string } -func (r *legacyLayerWriterWrapper) Close() error { +func (r *legacyLayerWriterWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() defer os.RemoveAll(r.root.Name()) defer r.legacyLayerWriter.CloseRoots() - err := r.legacyLayerWriter.Close() + + err = r.legacyLayerWriter.Close() if err != nil { return err } - if err = ImportLayer(r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { + if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { return err } for _, name := range r.Tombstones { @@ -90,13 +93,26 @@ func (r *legacyLayerWriterWrapper) Close() error { return err } } + + // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone + // deletion and hard link creation. This is because Tombstone deletion and hard link + // creation updates the directory last write timestamps so that will change the + // timestamps added by the `Add` call. Some container applications depend on the + // correctness of these timestamps and so we should change the timestamps back to + // the original value (i.e the value provided in the Add call) after this + // processing is done. + err = reapplyDirectoryTimes(r.destRoot, r.changedDi) + if err != nil { + return err + } + // Prepare the utility VM for use if one is present in the layer. if r.HasUtilityVM { err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) if err != nil { return err } - err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM")) + err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) if err != nil { return err } @@ -107,7 +123,18 @@ func (r *legacyLayerWriterWrapper) Close() error { // NewLayerWriter returns a new layer writer for creating a layer on disk. // The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges // to call this and any methods on the resulting LayerWriter. -func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) { +func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + if len(parentLayerPaths) == 0 { // This is a base layer. It gets imported differently. f, err := safefile.OpenRoot(path) @@ -115,6 +142,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) return nil, err } return &baseLayerWriter{ + ctx: ctx, + s: span, root: f, }, nil } @@ -128,6 +157,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) return nil, err } return &legacyLayerWriterWrapper{ + ctx: ctx, + s: span, legacyLayerWriter: w, path: importPath, parentLayerPaths: parentLayerPaths, diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go index 258167a57..01e672339 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -1,33 +1,28 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // LayerExists will return true if a layer with the given id exists and is known // to the system. -func LayerExists(path string) (_ bool, err error) { +func LayerExists(ctx context.Context, path string) (_ bool, err error) { title := "hcsshim::LayerExists" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) // Call the procedure itself. var exists uint32 err = layerExists(&stdDriverInfo, path, &exists) if err != nil { - return false, hcserror.New(err, title+" - failed", "") + return false, hcserror.New(err, title, "") } - fields["layer-exists"] = exists != 0 + span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) return exists != 0, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go index 90df3bedc..0ce34a30f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -1,13 +1,22 @@ package wclayer import ( + "context" "path/filepath" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // LayerID returns the layer ID of a layer on disk. -func LayerID(path string) (guid.GUID, error) { +func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { + title := "hcsshim::LayerID" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + _, file := filepath.Split(path) - return NameToGuid(file) + return NameToGuid(ctx, file) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go index 6d0ae8a07..1ec893c6a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -4,9 +4,10 @@ package wclayer // functionality. import ( + "context" "syscall" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/sirupsen/logrus" ) @@ -68,12 +69,12 @@ type WC_LAYER_DESCRIPTOR struct { Pathp *uint16 } -func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { +func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { // Array of descriptors that gets constructed. var layers []WC_LAYER_DESCRIPTOR for i := 0; i < len(parentLayerPaths); i++ { - g, err := LayerID(parentLayerPaths[i]) + g, err := LayerID(ctx, parentLayerPaths[i]) if err != nil { logrus.WithError(err).Debug("Failed to convert name to guid") return nil, err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index b8ea5d263..b7f3064f2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -15,6 +15,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/longpath" "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" ) var errorIterationCanceled = errors.New("") @@ -75,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) { defer tf.Close() s := bufio.NewScanner(tf) if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") + return nil, errors.New("invalid tombstones file") } ts := make(map[string]([]string)) @@ -341,7 +342,7 @@ type legacyLayerWriter struct { backupWriter *winio.BackupFileWriter Tombstones []string HasUtilityVM bool - uvmDi []dirInfo + changedDi []dirInfo addedFiles map[string]bool PendingLinks []pendingLink pendingDirs []pendingDir @@ -389,7 +390,7 @@ func (w *legacyLayerWriter) CloseRoots() { w.destRoot = nil } for i := range w.parentRoots { - w.parentRoots[i].Close() + _ = w.parentRoots[i].Close() } w.parentRoots = nil } @@ -472,8 +473,8 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool srcRoot, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.FILE_SHARE_READ, - safefile.FILE_OPEN, - safefile.FILE_OPEN_REPARSE_POINT) + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) if err != nil { return nil, err } @@ -488,14 +489,14 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool extraFlags := uint32(0) if isDir { - extraFlags |= safefile.FILE_DIRECTORY_FILE + extraFlags |= winapi.FILE_DIRECTORY_FILE } dest, err := safefile.OpenRelative( subPath, destRoot, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, syscall.FILE_SHARE_READ, - safefile.FILE_CREATE, + winapi.FILE_CREATE, extraFlags) if err != nil { return nil, err @@ -555,7 +556,7 @@ func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles if err != nil { return err } - if isDir && !isReparsePoint { + if isDir { di = append(di, dirInfo{path: relPath, fileInfo: *fi}) } } else { @@ -583,6 +584,10 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return w.initUtilityVM() } + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) + } + name = filepath.Clean(name) if hasPathPrefix(name, utilityVMPath) { if !w.HasUtilityVM { @@ -591,7 +596,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { return errors.New("invalid UtilityVM layer") } - createDisposition := uint32(safefile.FILE_OPEN) + createDisposition := uint32(winapi.FILE_OPEN) if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { st, err := safefile.LstatRelative(name, w.destRoot) if err != nil && !os.IsNotExist(err) { @@ -612,16 +617,13 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return err } } - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo}) - } } else { // Overwrite any existing hard link. err := safefile.RemoveRelative(name, w.destRoot) if err != nil && !os.IsNotExist(err) { return err } - createDisposition = safefile.FILE_CREATE + createDisposition = winapi.FILE_CREATE } f, err := safefile.OpenRelative( @@ -630,7 +632,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, syscall.FILE_SHARE_READ, createDisposition, - safefile.FILE_OPEN_REPARSE_POINT, + winapi.FILE_OPEN_REPARSE_POINT, ) if err != nil { return err @@ -638,7 +640,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro defer func() { if f != nil { f.Close() - safefile.RemoveRelative(name, w.destRoot) + _ = safefile.RemoveRelative(name, w.destRoot) } }() @@ -667,14 +669,14 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro w.currentIsDir = true } - f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, 0) + f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) if err != nil { return err } defer func() { if f != nil { f.Close() - safefile.RemoveRelative(fname, w.root) + _ = safefile.RemoveRelative(fname, w.root) } }() @@ -805,11 +807,5 @@ func (w *legacyLayerWriter) Close() error { return err } } - if w.HasUtilityVM { - err := reapplyDirectoryTimes(w.destRoot, w.uvmDi) - if err != nil { - return err - } - } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go index 45a63cf65..09950297c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -1,34 +1,29 @@ package wclayer import ( - "github.com/Microsoft/hcsshim/internal/guid" + "context" + + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // NameToGuid converts the given string into a GUID using the algorithm in the // Host Compute Service, ensuring GUIDs generated with the same string are common // across all clients. -func NameToGuid(name string) (id guid.GUID, err error) { +func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { title := "hcsshim::NameToGuid" - fields := logrus.Fields{ - "name": name, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("objectName", name)) + var id guid.GUID err = nameToGuid(name, &id) if err != nil { - err = hcserror.New(err, title+" - failed", "") - return + return guid.GUID{}, hcserror.New(err, title, "") } - fields["guid"] = id.String() - return + span.AddAttributes(trace.StringAttribute("guid", id.String())) + return id, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go index 2b65b0186..90129faef 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -1,10 +1,13 @@ package wclayer import ( + "context" + "strings" "sync" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) var prepareLayerLock sync.Mutex @@ -14,23 +17,17 @@ var prepareLayerLock sync.Mutex // parent layers, and is necessary in order to view or interact with the layer // as an actual filesystem (reading and writing files, creating directories, etc). // Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(path string, parentLayerPaths []string) (err error) { +func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { title := "hcsshim::PrepareLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } @@ -41,7 +38,7 @@ func PrepareLayer(path string, parentLayerPaths []string) (err error) { defer prepareLayerLock.Unlock() err = prepareLayer(&stdDriverInfo, path, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go index 884207c3e..30bcdff5f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -1,23 +1,41 @@ package wclayer -import "os" +import ( + "context" + "os" + + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) // ProcessBaseLayer post-processes a base layer that has had its files extracted. // The files should have been extracted to \Files. -func ProcessBaseLayer(path string) error { - err := processBaseImage(path) +func ProcessBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessBaseLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processBaseImage(path) if err != nil { - return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err} + return &os.PathError{Op: title, Path: path, Err: err} } return nil } // ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. // The files should have been extracted to \Files. -func ProcessUtilityVMImage(path string) error { - err := processUtilityImage(path) +func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessUtilityVMImage" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processUtilityImage(path) if err != nil { - return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err} + return &os.PathError{Op: title, Path: path, Err: err} } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go index bccd45969..71b130c52 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -1,30 +1,25 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // UnprepareLayer disables the filesystem filter for the read-write layer with // the given id. -func UnprepareLayer(path string) (err error) { +func UnprepareLayer(ctx context.Context, path string) (err error) { title := "hcsshim::UnprepareLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = unprepareLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go index 78f2aacd8..9b1e06d50 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -1,6 +1,9 @@ +// Package wclayer provides bindings to HCS's legacy layer management API and +// provides a higher level interface around these calls for container layer +// management. package wclayer -import "github.com/Microsoft/hcsshim/internal/guid" +import "github.com/Microsoft/go-winio/pkg/guid" //go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go @@ -24,4 +27,9 @@ import "github.com/Microsoft/hcsshim/internal/guid" //sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? +//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk + +//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW + type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go index d853ab259..67f917f07 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -38,6 +38,8 @@ func errnoErr(e syscall.Errno) error { var ( modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") procActivateLayer = modvmcompute.NewProc("ActivateLayer") procCopyLayer = modvmcompute.NewProc("CopyLayer") @@ -57,6 +59,9 @@ var ( procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") ) func activateLayer(info *driverInfo, id string) (hr error) { @@ -508,3 +513,57 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { } return } + +func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(directoryName) + if err != nil { + return + } + return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) +} + +func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 000000000..def952541 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,44 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go new file mode 100644 index 000000000..df28ea242 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -0,0 +1,13 @@ +package winapi + +import "github.com/Microsoft/go-winio/pkg/guid" + +//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA +//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA +//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW +//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW + +type DevPropKey struct { + Fmtid guid.GUID + Pid uint32 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go new file mode 100644 index 000000000..4e80ef68c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -0,0 +1,15 @@ +package winapi + +import "syscall" + +//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError + +const ( + STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B + ERROR_NO_MORE_ITEMS = 0x103 + ERROR_MORE_DATA syscall.Errno = 234 +) + +func NTSuccess(status uint32) bool { + return status == 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go new file mode 100644 index 000000000..7ce52afd5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -0,0 +1,110 @@ +package winapi + +//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile + +//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject +//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject + +const ( + FileLinkInformationClass = 11 + FileDispositionInformationExClass = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + OBJ_DONT_REPARSE = 0x1000 + + STATUS_MORE_ENTRIES = 0x105 + STATUS_NO_MORE_ENTRIES = 0x8000001a +) + +// Select entries from FILE_INFO_BY_HANDLE_CLASS. +// +// C declaration: +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class +const ( + FileIdInfo = 18 +) + +type FileDispositionInformationEx struct { + Flags uintptr +} + +type IOStatusBlock struct { + Status, Information uintptr +} + +type ObjectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *UnicodeString + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type ObjectDirectoryInformation struct { + Name UnicodeString + TypeName UnicodeString +} + +type FileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +// C declaration: +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info +type FILE_ID_INFO struct { + VolumeSerialNumber uint64 + FileID [16]byte +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go new file mode 100644 index 000000000..4e609cbf1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go @@ -0,0 +1,3 @@ +package winapi + +//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go new file mode 100644 index 000000000..ba12b1ad9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -0,0 +1,215 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// Messages that can be received from an assigned io completion port. +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +const ( + JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 + JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 + JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 + JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 + JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 + JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 + JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 + JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 + JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 + JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 +) + +// Access rights for creating or opening job objects. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights +const JOB_OBJECT_ALL_ACCESS = 0x1F001F + +// IO limit flags +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 + +const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +const ( + JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota + JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY + JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE +) + +// JobObjectInformationClass values. Used for a call to QueryInformationJobObject +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject +const ( + JobObjectBasicAccountingInformation uint32 = 1 + JobObjectBasicProcessIdList uint32 = 3 + JobObjectBasicAndIoAccountingInformation uint32 = 8 + JobObjectLimitViolationInformation uint32 = 13 + JobObjectMemoryUsageInformation uint32 = 28 + JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectIoAttribution uint32 = 42 +) + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { + ControlFlags uint32 + Value uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { + MaxIops int64 + MaxBandwidth int64 + ReservationIops int64 + BaseIOSize uint32 + VolumeName string + ControlFlags uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list +type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { + NumberOfAssignedProcesses uint32 + NumberOfProcessIdsInList uint32 + ProcessIdList [1]uintptr +} + +// AllPids returns all the process Ids in the job object. +func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information +type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { + TotalUserTime int64 + TotalKernelTime int64 + ThisPeriodTotalUserTime int64 + ThisPeriodTotalKernelTime int64 + TotalPageFaultCount uint32 + TotalProcesses uint32 + ActiveProcesses uint32 + TotalTerminateProcesses uint32 +} + +//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { + BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION + IoInfo windows.IO_COUNTERS +} + +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; +// +type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { + JobMemory uint64 + PeakJobMemoryUsed uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; +// +type JOBOBJECT_IO_ATTRIBUTION_STATS struct { + IoCount uintptr + TotalNonOverlappedQueueTime uint64 + TotalNonOverlappedServiceTime uint64 + TotalSize uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; +// +type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { + ControlFlags uint32 + ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS + WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { + CompletionKey windows.Handle + CompletionPort windows.Handle +} + +// BOOL IsProcessInJob( +// HANDLE ProcessHandle, +// HANDLE JobHandle, +// PBOOL Result +// ); +// +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob + +// BOOL QueryInformationJobObject( +// HANDLE hJob, +// JOBOBJECTINFOCLASS JobObjectInformationClass, +// LPVOID lpJobObjectInformation, +// DWORD cbJobObjectInformationLength, +// LPDWORD lpReturnLength +// ); +// +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject + +// HANDLE OpenJobObjectW( +// DWORD dwDesiredAccess, +// BOOL bInheritHandle, +// LPCWSTR lpName +// ); +// +//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW + +// DWORD SetIoRateControlInformationJobObject( +// HANDLE hJob, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo +// ); +// +//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject + +// DWORD QueryIoRateControlInformationJobObject( +// HANDLE hJob, +// PCWSTR VolumeName, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, +// ULONG *InfoBlockCount +// ); +//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject + +// NTSTATUS +// NtOpenJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject + +// NTSTATUS +// NTAPI +// NtCreateJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go new file mode 100644 index 000000000..b6e7cfd46 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go @@ -0,0 +1,30 @@ +package winapi + +// BOOL LogonUserA( +// LPCWSTR lpszUsername, +// LPCWSTR lpszDomain, +// LPCWSTR lpszPassword, +// DWORD dwLogonType, +// DWORD dwLogonProvider, +// PHANDLE phToken +// ); +// +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW + +// Logon types +const ( + LOGON32_LOGON_INTERACTIVE uint32 = 2 + LOGON32_LOGON_NETWORK uint32 = 3 + LOGON32_LOGON_BATCH uint32 = 4 + LOGON32_LOGON_SERVICE uint32 = 5 + LOGON32_LOGON_UNLOCK uint32 = 7 + LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 + LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 +) + +// Logon providers +const ( + LOGON32_PROVIDER_DEFAULT uint32 = 0 + LOGON32_PROVIDER_WINNT40 uint32 = 2 + LOGON32_PROVIDER_WINNT50 uint32 = 3 +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go new file mode 100644 index 000000000..53f62948c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -0,0 +1,4 @@ +package winapi + +//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys LocalFree(ptr uintptr) = kernel32.LocalFree diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go new file mode 100644 index 000000000..f37910024 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go @@ -0,0 +1,3 @@ +package winapi + +//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go new file mode 100644 index 000000000..908920e87 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -0,0 +1,11 @@ +package winapi + +// DWORD SearchPathW( +// LPCWSTR lpPath, +// LPCWSTR lpFileName, +// LPCWSTR lpExtension, +// DWORD nBufferLength, +// LPWSTR lpBuffer, +// LPWSTR *lpFilePart +// ); +//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go new file mode 100644 index 000000000..37839435b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -0,0 +1,8 @@ +package winapi + +const PROCESS_ALL_ACCESS uint32 = 2097151 + +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go new file mode 100644 index 000000000..ce79ac2cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go @@ -0,0 +1,7 @@ +package winapi + +// Get count from all processor groups. +// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups +const ALL_PROCESSOR_GROUPS = 0xFFFF + +//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go new file mode 100644 index 000000000..327f57d7c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -0,0 +1,52 @@ +package winapi + +import "golang.org/x/sys/windows" + +const SystemProcessInformation = 5 + +const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 + +// __kernel_entry NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 // ULONG + NumberOfThreads uint32 // ULONG + WorkingSetPrivateSize int64 // LARGE_INTEGER + HardFaultCount uint32 // ULONG + NumberOfThreadsHighWatermark uint32 // ULONG + CycleTime uint64 // ULONGLONG + CreateTime int64 // LARGE_INTEGER + UserTime int64 // LARGE_INTEGER + KernelTime int64 // LARGE_INTEGER + ImageName UnicodeString // UNICODE_STRING + BasePriority int32 // KPRIORITY + UniqueProcessID windows.Handle // HANDLE + InheritedFromUniqueProcessID windows.Handle // HANDLE + HandleCount uint32 // ULONG + SessionID uint32 // ULONG + UniqueProcessKey *uint32 // ULONG_PTR + PeakVirtualSize uintptr // SIZE_T + VirtualSize uintptr // SIZE_T + PageFaultCount uint32 // ULONG + PeakWorkingSetSize uintptr // SIZE_T + WorkingSetSize uintptr // SIZE_T + QuotaPeakPagedPoolUsage uintptr // SIZE_T + QuotaPagedPoolUsage uintptr // SIZE_T + QuotaPeakNonPagedPoolUsage uintptr // SIZE_T + QuotaNonPagedPoolUsage uintptr // SIZE_T + PagefileUsage uintptr // SIZE_T + PeakPagefileUsage uintptr // SIZE_T + PrivatePageCount uintptr // SIZE_T + ReadOperationCount int64 // LARGE_INTEGER + WriteOperationCount int64 // LARGE_INTEGER + OtherOperationCount int64 // LARGE_INTEGER + ReadTransferCount int64 // LARGE_INTEGER + WriteTransferCount int64 // LARGE_INTEGER + OtherTransferCount int64 // LARGE_INTEGER +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go new file mode 100644 index 000000000..4724713e3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -0,0 +1,12 @@ +package winapi + +// HANDLE CreateRemoteThread( +// HANDLE hProcess, +// LPSECURITY_ATTRIBUTES lpThreadAttributes, +// SIZE_T dwStackSize, +// LPTHREAD_START_ROUTINE lpStartAddress, +// LPVOID lpParameter, +// DWORD dwCreationFlags, +// LPDWORD lpThreadId +// ); +//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go new file mode 100644 index 000000000..859b753c2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -0,0 +1,80 @@ +package winapi + +import ( + "errors" + "reflect" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice +// for easier interop with Go APIs +func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) + hdr.Data = uintptr(unsafe.Pointer(buffer)) + hdr.Cap = bufferLength + hdr.Len = bufferLength + + return +} + +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string +type UnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + +//String converts a UnicodeString to a golang string +func (uni UnicodeString) String() string { + // UnicodeString is not guaranteed to be null terminated, therefore + // use the UnicodeString's Length field + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) +} + +// NewUnicodeString allocates a new UnicodeString and copies `s` into +// the buffer of the new UnicodeString. +func NewUnicodeString(s string) (*UnicodeString, error) { + buf, err := windows.UTF16FromString(s) + if err != nil { + return nil, err + } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + + uni := &UnicodeString{ + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), + Buffer: &buf[0], + } + return uni, nil +} + +// ConvertStringSetToSlice is a helper function used to convert the contents of +// `buf` into a string slice. `buf` contains a set of null terminated strings +// with an additional null at the end to indicate the end of the set. +func ConvertStringSetToSlice(buf []byte) ([]string, error) { + var results []string + prev := 0 + for i := range buf { + if buf[i] == 0 { + if prev == i { + // found two null characters in a row, return result + return results, nil + } + results = append(results, string(buf[prev:i])) + prev = i + 1 + } + } + return nil, errors.New("string set malformed: missing null terminator at end of buffer") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go new file mode 100644 index 000000000..1d4ba3c4f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -0,0 +1,5 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi + +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go new file mode 100644 index 000000000..4eb64b4c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -0,0 +1,360 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package winapi + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") +) + +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) + } + return +} + +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func LocalFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) + return +} + +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return + } + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) +} + +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + status = uint32(r0) + return +} + +func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { + var _p0 uint32 + if singleEntry { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func RtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go index df0e63bbd..891616370 100644 --- a/vendor/github.com/Microsoft/hcsshim/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -1,10 +1,11 @@ package hcsshim import ( + "context" "crypto/sha1" "path/filepath" - "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/wclayer" ) @@ -13,59 +14,59 @@ func layerPath(info *DriverInfo, id string) string { } func ActivateLayer(info DriverInfo, id string) error { - return wclayer.ActivateLayer(layerPath(&info, id)) + return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) } func CreateLayer(info DriverInfo, id, parent string) error { - return wclayer.CreateLayer(layerPath(&info, id), parent) + return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) } // New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) } func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) } func DeactivateLayer(info DriverInfo, id string) error { - return wclayer.DeactivateLayer(layerPath(&info, id)) + return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) } func DestroyLayer(info DriverInfo, id string) error { - return wclayer.DestroyLayer(layerPath(&info, id)) + return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) } // New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) } func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) } func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { - return wclayer.ExportLayer(layerPath(&info, layerId), exportFolderPath, parentLayerPaths) + return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) } func GetLayerMountPath(info DriverInfo, id string) (string, error) { - return wclayer.GetLayerMountPath(layerPath(&info, id)) + return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) } func GetSharedBaseImages() (imageData string, err error) { - return wclayer.GetSharedBaseImages() + return wclayer.GetSharedBaseImages(context.Background()) } func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { - return wclayer.ImportLayer(layerPath(&info, layerID), importFolderPath, parentLayerPaths) + return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) } func LayerExists(info DriverInfo, id string) (bool, error) { - return wclayer.LayerExists(layerPath(&info, id)) + return wclayer.LayerExists(context.Background(), layerPath(&info, id)) } func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { - return wclayer.PrepareLayer(layerPath(&info, layerId), parentLayerPaths) + return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) } func ProcessBaseLayer(path string) error { - return wclayer.ProcessBaseLayer(path) + return wclayer.ProcessBaseLayer(context.Background(), path) } func ProcessUtilityVMImage(path string) error { - return wclayer.ProcessUtilityVMImage(path) + return wclayer.ProcessUtilityVMImage(context.Background(), path) } func UnprepareLayer(info DriverInfo, layerId string) error { - return wclayer.UnprepareLayer(layerPath(&info, layerId)) + return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) } type DriverInfo struct { @@ -76,8 +77,8 @@ type DriverInfo struct { type GUID [16]byte func NameToGuid(name string) (id GUID, err error) { - g, err := wclayer.NameToGuid(name) - return GUID(g), err + g, err := wclayer.NameToGuid(context.Background(), name) + return g.ToWindowsArray(), err } func NewGUID(source string) *GUID { @@ -88,19 +89,19 @@ func NewGUID(source string) *GUID { } func (g *GUID) ToString() string { - return (guid.GUID)(*g).String() + return guid.FromWindowsArray(*g).String() } type LayerReader = wclayer.LayerReader func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { - return wclayer.NewLayerReader(layerPath(&info, layerID), parentLayerPaths) + return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) } type LayerWriter = wclayer.LayerWriter func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { - return wclayer.NewLayerWriter(layerPath(&info, layerID), parentLayerPaths) + return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) } type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 000000000..3ab3bcd89 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,50 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + var err error + osv = OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 000000000..75dce5d82 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,50 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + + // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + + // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + + // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 +) diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go index ca8acbb7c..3362c6833 100644 --- a/vendor/github.com/Microsoft/hcsshim/process.go +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -1,7 +1,9 @@ package hcsshim import ( + "context" "io" + "sync" "time" "github.com/Microsoft/hcsshim/internal/hcs" @@ -9,7 +11,10 @@ import ( // ContainerError is an error encountered in HCS type process struct { - p *hcs.Process + p *hcs.Process + waitOnce sync.Once + waitCh chan struct{} + waitErr error } // Pid returns the process ID of the process within the container. @@ -19,7 +24,14 @@ func (process *process) Pid() int { // Kill signals the process to terminate but does not wait for it to finish terminating. func (process *process) Kill() error { - return convertProcessError(process.p.Kill(), process) + found, err := process.p.Kill(context.Background()) + if err != nil { + return convertProcessError(err, process) + } + if !found { + return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} + } + return nil } // Wait waits for the process to exit. @@ -30,7 +42,21 @@ func (process *process) Wait() error { // WaitTimeout waits for the process to exit or the duration to elapse. It returns // false if timeout occurs. func (process *process) WaitTimeout(timeout time.Duration) error { - return convertProcessError(process.p.WaitTimeout(timeout), process) + process.waitOnce.Do(func() { + process.waitCh = make(chan struct{}) + go func() { + process.waitErr = process.Wait() + close(process.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} + case <-process.waitCh: + return process.waitErr + } } // ExitCode returns the exit code of the process. The process must have @@ -45,14 +71,14 @@ func (process *process) ExitCode() (int, error) { // ResizeConsole resizes the console of the process. func (process *process) ResizeConsole(width, height uint16) error { - return convertProcessError(process.p.ResizeConsole(width, height), process) + return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) } // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing // these pipes does not close the underlying pipes; it should be possible to // call this multiple times to get multiple interfaces. func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { - stdin, stdout, stderr, err := process.p.Stdio() + stdin, stdout, stderr, err := process.p.StdioLegacy() if err != nil { err = convertProcessError(err, process) } @@ -62,7 +88,7 @@ func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e // CloseStdin closes the write side of the stdin pipe so that the process is // notified on the read side that there is no more data in stdin. func (process *process) CloseStdin() error { - return convertProcessError(process.p.CloseStdin(), process) + return convertProcessError(process.p.CloseStdin(context.Background()), process) } // Close cleans up any state associated with the process but does not kill diff --git a/vendor/github.com/Microsoft/hcsshim/vendor.conf b/vendor/github.com/Microsoft/hcsshim/vendor.conf deleted file mode 100644 index 6e0ed1566..000000000 --- a/vendor/github.com/Microsoft/hcsshim/vendor.conf +++ /dev/null @@ -1,21 +0,0 @@ -github.com/blang/semver v3.1.0 -github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 -github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 -github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 -github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f -github.com/konsorten/go-windows-terminal-sequences v1.0.1 -github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c -github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2 -github.com/Microsoft/opengcs v0.3.9 -github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 -github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88 -github.com/pkg/errors v0.8.1 -github.com/sirupsen/logrus v1.3.0 -github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 -github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c -github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6 -github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b -github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874 -golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 -golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f -golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 788fe6e27..212fe25e7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -15,6 +15,12 @@ type Config struct { Endpoint string SigningRegion string SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overriden based on metadata the + // service has. + SigningNameDerived bool } // ConfigProvider provides a generic way for a service client to receive @@ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) - c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 63d2df67c..a397b0d04 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,12 +1,11 @@ package client import ( - "math/rand" "strconv" - "sync" "time" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) // DefaultRetryer implements basic retry logic using exponential backoff for @@ -31,8 +30,6 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes @@ -53,7 +50,7 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { retryCount = 13 } - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } @@ -65,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { return *r.Retryable } - if r.HTTPResponse.StatusCode >= 500 { + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } return r.IsErrorRetryable() || d.shouldThrottle(r) @@ -117,22 +114,3 @@ func canUseRetryAfterHeader(r *request.Request) bool { return true } - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 1f39c91f2..ce9fb896d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error { return reader.Source.Close() } +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + func logRequest(r *request.Request) { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) return } if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. r.ResetBody() } - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) } const logRespMsg = `DEBUG: Response %s/%s Details: @@ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s: %s -----------------------------------------------------` +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } } handlerFn := func(req *request.Request) { - body, err := httputil.DumpResponse(req.HTTPResponse, false) + b, err := httputil.DumpResponse(req.HTTPResponse, false) if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) return } - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) - if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(string(b)) } } @@ -106,3 +158,27 @@ func logResponse(r *request.Request) { Name: handlerName, Fn: handlerFn, }) } + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 4778056dd..920e9fddf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -3,6 +3,7 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { ServiceName string + ServiceID string APIVersion string Endpoint string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 4fd0d0724..5421b5d4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -151,6 +151,15 @@ type Config struct { // with accelerate. S3UseAccelerate *bool + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { func (c *Config) WithS3UseAccelerate(enable bool) *Config { c.S3UseAccelerate = &enable return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + } // WithUseDualStack sets a config UseDualStack value returning a Config @@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 495e3ef62..cfcddf3dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -3,12 +3,10 @@ package corehandlers import ( "bytes" "fmt" - "io" "io/ioutil" "net/http" "net/url" "regexp" - "runtime" "strconv" "time" @@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } } } @@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen } }} -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..a15f496bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec_env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 42416fc2f..a270844df 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -158,13 +158,14 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now } - return e.expiration.Before(e.CurrentTime()) + return e.expiration.Before(curTime()) } -// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // @@ -178,7 +179,8 @@ func (e *Expiry) IsExpired() bool { type Credentials struct { creds Value forceRefresh bool - m sync.Mutex + + m sync.RWMutex provider Provider } @@ -201,6 +203,17 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. c.m.Lock() defer c.m.Unlock() @@ -234,8 +247,8 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() + c.m.RLock() + defer c.m.RUnlock() return c.isExpired() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index c39749524..0ed791be6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -4,7 +4,6 @@ import ( "bufio" "encoding/json" "fmt" - "path" "strings" "time" @@ -12,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // ProviderName provides a name of EC2Role provider @@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct { Message string } -const iamSecurityCredsPath = "/iam/security-credentials" +const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request @@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..152d785b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..2f0c6eac9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..4b0d630e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,51 @@ +package csm + +import ( + "strconv" + "time" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..514fc3739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..11082e5ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,231 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + setError(&m, awserr) + } + } + + rep.metricsCh.Push(m) +} + +func setError(m *metric, err awserr.Error) { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + m.SDKException = &code + m.SDKExceptionMessage = &msg + default: + m.AWSException = &code + m.AWSExceptionMessage = &msg + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + } + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} + apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + + handlers.Complete.PushFrontNamed(apiCallHandler) + handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + + handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 2cb08182f..5040a2f64 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -73,6 +73,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) @@ -91,14 +92,25 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, + Providers: CredProviders(cfg, handlers), }) } +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + const ( httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 984407a58..c215cd3f5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,12 +4,12 @@ import ( "encoding/json" "fmt" "net/http" - "path" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // GetMetadata uses the path provided to request information from the EC2 @@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), + HTTPPath: sdkuri.PathJoin("/meta-data", p), } output := &metadataOutput{} @@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), + HTTPPath: "/user-data", } output := &metadataOutput{} @@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), + HTTPPath: sdkuri.PathJoin("/dynamic", p), } output := &metadataOutput{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 5b4379dbd..ef5f73292 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -1,5 +1,10 @@ // Package ec2metadata provides the client for making API calls to the // EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environemnt variable is set to true, (case insensitive). package ec2metadata import ( @@ -7,17 +12,21 @@ import ( "errors" "io" "net/http" + "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" ) // ServiceName is the name of the service. const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + // Add additional options to the service config for _, option := range opts { option(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 74f72de07..c04ba06c5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -84,6 +84,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddEC2Metadata(p) custAddS3DualStack(p) custRmIotDataService(p) + custFixAppAutoscalingChina(p) } return ps, nil @@ -122,6 +123,27 @@ func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 56f08e386..828bb3a44 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -45,15 +45,21 @@ const ( // Service identifiers const ( + A4bServiceID = "a4b" // A4b. AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. ApiPricingServiceID = "api.pricing" // ApiPricing. ApigatewayServiceID = "apigateway" // Apigateway. ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. Appstream2ServiceID = "appstream2" // Appstream2. AthenaServiceID = "athena" // Athena. AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. BatchServiceID = "batch" // Batch. BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + Cloud9ServiceID = "cloud9" // Cloud9. ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. CloudformationServiceID = "cloudformation" // Cloudformation. CloudfrontServiceID = "cloudfront" // Cloudfront. @@ -69,6 +75,7 @@ const ( CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. CognitoIdpServiceID = "cognito-idp" // CognitoIdp. CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. ConfigServiceID = "config" // Config. CurServiceID = "cur" // Cur. DatapipelineServiceID = "datapipeline" // Datapipeline. @@ -94,10 +101,12 @@ const ( EsServiceID = "es" // Es. EventsServiceID = "events" // Events. FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. GameliftServiceID = "gamelift" // Gamelift. GlacierServiceID = "glacier" // Glacier. GlueServiceID = "glue" // Glue. GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. HealthServiceID = "health" // Health. IamServiceID = "iam" // Iam. ImportexportServiceID = "importexport" // Importexport. @@ -105,18 +114,24 @@ const ( IotServiceID = "iot" // Iot. KinesisServiceID = "kinesis" // Kinesis. KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. KmsServiceID = "kms" // Kms. LambdaServiceID = "lambda" // Lambda. LightsailServiceID = "lightsail" // Lightsail. LogsServiceID = "logs" // Logs. MachinelearningServiceID = "machinelearning" // Machinelearning. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. MghServiceID = "mgh" // Mgh. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. ModelsLexServiceID = "models.lex" // ModelsLex. MonitoringServiceID = "monitoring" // Monitoring. MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. OpsworksServiceID = "opsworks" // Opsworks. OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. OrganizationsServiceID = "organizations" // Organizations. @@ -125,12 +140,18 @@ const ( RdsServiceID = "rds" // Rds. RedshiftServiceID = "redshift" // Redshift. RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. Route53ServiceID = "route53" // Route53. Route53domainsServiceID = "route53domains" // Route53domains. RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. S3ServiceID = "s3" // S3. + SagemakerServiceID = "sagemaker" // Sagemaker. SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. ShieldServiceID = "shield" // Shield. SmsServiceID = "sms" // Sms. SnowballServiceID = "snowball" // Snowball. @@ -144,9 +165,11 @@ const ( SupportServiceID = "support" // Support. SwfServiceID = "swf" // Swf. TaggingServiceID = "tagging" // Tagging. + TranslateServiceID = "translate" // Translate. WafServiceID = "waf" // Waf. WafRegionalServiceID = "waf-regional" // WafRegional. WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. WorkspacesServiceID = "workspaces" // Workspaces. XrayServiceID = "xray" // Xray. ) @@ -244,6 +267,12 @@ var awsPartition = partition{ }, }, Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -264,6 +293,33 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "api.pricing": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -339,10 +395,13 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -370,17 +429,38 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -397,11 +477,35 @@ var awsPartition = partition{ }, }, }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "clouddirectory": service{ Endpoints: endpoints{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -459,7 +563,11 @@ var awsPartition = partition{ }, }, "cloudhsmv2": service{ - + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, @@ -468,6 +576,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -514,16 +623,43 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "codecommit": service{ @@ -538,6 +674,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -577,6 +714,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -588,6 +726,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -648,6 +787,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -689,9 +839,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -740,6 +893,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -881,11 +1035,17 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ @@ -911,11 +1071,14 @@ var awsPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1043,13 +1206,32 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "gamelift": service{ Endpoints: endpoints{ @@ -1093,10 +1275,17 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1108,10 +1297,34 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "health": service{ Endpoints: endpoints{ @@ -1156,6 +1369,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1169,6 +1383,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -1202,9 +1417,20 @@ var awsPartition = partition{ "kinesisanalytics": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "kms": service{ @@ -1251,12 +1477,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1295,6 +1524,66 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -1311,6 +1600,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1337,7 +1627,9 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "monitoring": service{ @@ -1372,6 +1664,35 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -1380,6 +1701,7 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -1394,9 +1716,15 @@ var awsPartition = partition{ "opsworks-cm": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "organizations": service{ @@ -1464,7 +1792,38 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "redshift": service{ + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1484,14 +1843,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "rekognition": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "route53": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1520,6 +1871,20 @@ var awsPartition = partition{ Endpoints: endpoints{ "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "s3": service{ @@ -1581,6 +1946,19 @@ var awsPartition = partition{ }, }, }, + "sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1599,6 +1977,74 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, "servicecatalog": service{ Endpoints: endpoints{ @@ -1619,6 +2065,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ @@ -1635,10 +2091,14 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1650,7 +2110,9 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -1700,7 +2162,31 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -1733,12 +2219,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1888,6 +2379,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1895,6 +2387,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1912,8 +2415,11 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1929,15 +2435,28 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1996,12 +2515,13 @@ var awscnPartition = partition{ "apigateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", + Hostname: "autoscaling.{region}.amazonaws.com.cn", Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "application-autoscaling", @@ -2062,6 +2582,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "dynamodb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -2094,13 +2621,15 @@ var awscnPartition = partition{ "ecr": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ecs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticache": service{ @@ -2190,7 +2719,8 @@ var awscnPartition = partition{ "lambda": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "logs": service{ @@ -2233,6 +2763,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ @@ -2300,7 +2837,8 @@ var awscnPartition = partition{ "tagging": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, }, @@ -2364,6 +2902,16 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "cloudtrail": service{ Endpoints: endpoints{ @@ -2423,9 +2971,27 @@ var awsusgovPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -2451,6 +3017,12 @@ var awsusgovPartition = partition{ }, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ @@ -2478,6 +3050,12 @@ var awsusgovPartition = partition{ }, }, }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -2502,12 +3080,28 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "monitoring": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -2578,6 +3172,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2606,5 +3212,19 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c3eedb48..e29c09512 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { rs := map[string]Region{} - for id := range p.p.Regions { + for id, r := range p.p.Regions { rs[id] = Region{ - id: id, - p: p.p, + id: id, + desc: r.Description, + p: p.p, } } @@ -240,6 +241,10 @@ type Region struct { // ID returns the region's identifier. func (r Region) ID() string { return r.id } +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve func (s Service) Regions() map[string]Region { rs := map[string]Region{} for id := range s.p.Services[s.id].Endpoints { - if _, ok := s.p.Regions[id]; ok { + if r, ok := s.p.Regions[id]; ok { rs[id] = Region{ - id: id, - p: s.p, + id: id, + desc: r.Description, + p: s.p, } } } @@ -347,6 +353,10 @@ type ResolvedEndpoint struct { // The service name that should be used for signing requests. SigningName string + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + // The signing method that should be used for signing requests. SigningMethod string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 13d968a24..ff6f76db6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op if len(signingRegion) == 0 { signingRegion = region } + signingName := e.CredentialScope.Service + var signingNameDerived bool if len(signingName) == 0 { signingName = service + signingNameDerived = true } return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index 3babb5abd..6ed15b2ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -71,6 +71,12 @@ const ( // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 802ac88ad..605a72d3c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -14,6 +14,7 @@ type Handlers struct { Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList + UnmarshalStream HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList @@ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers { Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), @@ -45,6 +47,7 @@ func (h *Handlers) Clear() { h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() + h.UnmarshalStream.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() @@ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { return swapped } +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + // SetBackNamed will replace the named handler if it exists in the handler list. // If the handler does not exist the handler will be added to the end of the list. func (l *HandlerList) SetBackNamed(n NamedHandler) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index 02f07f4a4..b0c2ef4fe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -3,6 +3,8 @@ package request import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing @@ -15,7 +17,7 @@ type offsetReader struct { func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - buf.Seek(offset, 0) + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf return reader diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 5c7db4982..75f0fe077 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -45,6 +46,7 @@ type Request struct { Handlers Handlers Retryer + AttemptTime time.Time Time time.Time Operation *Operation HTTPRequest *http.Request @@ -120,6 +122,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, Handlers: handlers.Copy(), Retryer: retryer, + AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -224,6 +227,9 @@ func (r *Request) SetContext(ctx aws.Context) { // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } @@ -255,6 +261,7 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. r.ResetBody() } @@ -292,6 +299,11 @@ func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, err return getPresignedURL(r, expire) } +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { if expire <= 0 { return "", nil, awserr.New( @@ -332,7 +344,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run +// Any additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have @@ -358,9 +370,9 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request returning error if errors are encountered. +// Sign will sign the request, returning error if errors are encountered. // -// Send will build the request prior to signing. All Sign Handlers will +// Sign will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() @@ -393,7 +405,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // of the SDK if they used that field. // // Related golang/go#18257 - l, err := computeBodyLength(r.Body) + l, err := aws.SeekerLen(r.Body) if err != nil { return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } @@ -411,7 +423,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Transfer-Encoding: chunked bodies for these methods. // // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": body = NoBody @@ -423,49 +436,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { return body, nil } -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - case *aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - // GetBody will return an io.ReadSeeker of the Request's underlying // input body with a concurrency safe wrapper. func (r *Request) GetBody() io.ReadSeeker { return r.safeBody } -// Send will send the request returning error if errors are encountered. +// Send will send the request, returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. @@ -486,6 +463,7 @@ func (r *Request) Send() error { }() for { + r.AttemptTime = time.Now() if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 869b97a1a..e36e468b7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } var NoBody = noBody{} // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index c32fc69bc..7c6a8000f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -11,7 +11,7 @@ import ( var NoBody = http.NoBody // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 59de6736b..a633ed5ac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -35,8 +35,12 @@ type Pagination struct { // NewRequest should always be built from the same API operations. It is // undefined if different API operations are returned on subsequent calls. NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool started bool + prevTokens []interface{} nextTokens []interface{} err error @@ -49,7 +53,15 @@ type Pagination struct { // // Will always return true if Next has not been called yet. func (p *Pagination) HasNextPage() bool { - return !(p.started && len(p.nextTokens) == 0) + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage } // Err returns the error Pagination encountered when retrieving the next page. @@ -96,6 +108,7 @@ func (p *Pagination) Next() bool { return false } + p.prevTokens = p.nextTokens p.nextTokens = req.nextPageTokens() p.curPage = req.Data @@ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index f35fef213..7d5270298 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { } if t, ok := err.(temporaryError); ok { - return t.Temporary() + return t.Temporary() || isErrConnectionReset(err) } return isErrConnectionReset(err) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index ea7b886f8..98d420fd6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both +AWS Services. They are from a configuration file will need to include both aws_access_key_id and aws_secret_access_key must be provided together in the same file to be considered valid. The values will be ignored if not a complete group. aws_session_token is an optional field that can be provided if both of diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index f1adcf481..82e04d76c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -5,6 +5,7 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -95,9 +96,23 @@ type envConfig struct { // // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string } var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -156,6 +171,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} @@ -176,6 +197,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 9f75d5ac5..51f305563 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" @@ -26,7 +27,7 @@ import ( // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // -// The Session satisfies the service client's client.ClientConfigProvider. +// The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers @@ -58,7 +59,12 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(Options{}, envCfg, cfgs...) + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -76,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session { r.Error = err }) } + return s } - return deprecatedNewSession(cfgs...) + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s } // NewSession returns a new Session created from SDK defaults, config files, @@ -243,13 +255,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - if len(envCfg.SharedCredentialsFile) == 0 { - envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(envCfg.SharedConfigFile) == 0 { - envCfg.SharedConfigFile = defaults.SharedConfigFilename() - } - // Only use AWS_CA_BUNDLE if session option is not provided. if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { f, err := os.Open(envCfg.CustomCABundle) @@ -302,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { } initHandlers(s) - return s } +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -345,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } // Setup HTTP client with custom cert bundle if enabled if opts.CustomCABundle != nil { @@ -573,11 +593,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, }, err } @@ -597,10 +618,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index ccc88b4ac..8aa0681d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -71,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -97,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -134,6 +135,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, @@ -341,7 +343,9 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } // If the request is not presigned the body should be attached to it. This // prevents the confusion of wanting to send a signed request without @@ -503,11 +507,13 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { v4.Logger.Log(msg) } -func (ctx *signingCtx) build(disableHeaderHoisting bool) { +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends - ctx.buildBodyDigest() + if err := ctx.buildBodyDigest(); err != nil { + return err + } unsignedHeaders := ctx.Request.Header if ctx.isPresign { @@ -535,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } + + return nil } func (ctx *signingCtx) buildTime() { @@ -661,21 +669,34 @@ func (ctx *signingCtx) buildSignature() { ctx.signature = hex.EncodeToString(signature) } -func (ctx *signingCtx) buildBodyDigest() { +func (ctx *signingCtx) buildBodyDigest() error { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign } else if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + + if includeSHA256Header { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } ctx.bodyDigest = hash + + return nil } // isRequestSigned returns if the request is currently signed or presigned @@ -715,8 +736,8 @@ func makeSha256(data []byte) []byte { func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) io.Copy(hash, reader) return hash.Sum(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 0e2d864e1..8b6f23425 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -3,6 +3,8 @@ package aws import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should @@ -22,6 +24,22 @@ type ReaderSeekerCloser struct { r io.Reader } +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // @@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool { return ok } +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 87e15b64b..668171d36 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.54" +const SDKVersion = "1.15.11" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..5aa9137e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..e5f005613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go index eedc5bd7d..3104e6ce4 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -24,7 +24,7 @@ func Build(r *request.Request) { r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..e21614a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 18169f0f8..60e5b09d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -25,7 +25,7 @@ func Build(r *request.Request) { return } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 5ce9cba32..75866d012 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -233,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index c405288d7..b34f5258a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -20,9 +20,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -270,7 +267,14 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: - str = value.UTC().Format(RFC822) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) case aws.JSONValue: if len(value) == 0 { return "", errValueNotSet diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 823f045ee..33fd53b12 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -198,7 +198,11 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&f)) case *time.Time: - t, err := time.Parse(RFC822, header) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..b7ed6c6f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 7091b456d..1bfe45f65 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -13,9 +13,13 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { b := xmlBuilder{encoder: e, namespaces: map[string]string{}} root := NewXMLElement(xml.Name{}) if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { @@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error { } for _, c := range root.Children { for _, v := range c { - return StructToXML(e, v, false) + return StructToXML(e, v, sorted) } } return nil @@ -90,8 +94,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl return nil } - fieldAdded := false - // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) @@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl child.Attr = append(child.Attr, ns) } + var payloadFields, nonPayloadFields int + t := value.Type() for i := 0; i < value.NumField(); i++ { member := elemOf(value.Field(i)) @@ -133,8 +137,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ continue } + payloadFields++ if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -149,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if err := b.buildValue(member, child, mTag); err != nil { return err } - - fieldAdded = true } - if fieldAdded { // only append this child if we have one ore more valid members + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { current.AddChild(child) } @@ -278,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case float32: str = strconv.FormatFloat(float64(converted), 'f', -1, 32) case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", tag.Get("locationName"), value.Interface(), value.Type().Name()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 87584628a..ff1ef6830 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalXML deserializes an xml.Decoder into the container v. V @@ -52,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { if t == "" { switch rtype.Kind() { case reflect.Struct: - t = "structure" + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } case reflect.Slice: - t = "list" + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } case reflect.Map: t = "map" } @@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3e970b629..515ce1521 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode { // AddChild adds child to the XMLNode. func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n if _, ok := n.Children[child.Name.Local]; !ok { n.Children[child.Name.Local] = []*XMLNode{} } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 4598ccd82..69d37022a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -17,7 +17,7 @@ const opAcceptReservedInstancesExchangeQuote = "AcceptReservedInstancesExchangeQ // AcceptReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the // client's request for the AcceptReservedInstancesExchangeQuote operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -92,7 +92,7 @@ const opAcceptVpcEndpointConnections = "AcceptVpcEndpointConnections" // AcceptVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the // client's request for the AcceptVpcEndpointConnections operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -167,7 +167,7 @@ const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" // AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the AcceptVpcPeeringConnection operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -247,7 +247,7 @@ const opAllocateAddress = "AllocateAddress" // AllocateAddressRequest generates a "aws/request.Request" representing the // client's request for the AllocateAddress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -287,17 +287,21 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // AllocateAddress API operation for Amazon Elastic Compute Cloud. // -// Allocates an Elastic IP address. +// Allocates an Elastic IP address to your AWS account. After you allocate the +// Elastic IP address you can associate it with an instance or network interface. +// After you release an Elastic IP address, it is released to the IP address +// pool and can be allocated to a different AWS account. +// +// [EC2-VPC] If you release an Elastic IP address, you might be able to recover +// it. You cannot recover an Elastic IP address that you released after it is +// allocated to another AWS account. You cannot recover an Elastic IP address +// for EC2-Classic. To attempt to recover an Elastic IP address that you released, +// specify it in this operation. // // An Elastic IP address is for use either in the EC2-Classic platform or in // a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic // per region and 5 Elastic IP addresses for EC2-VPC per region. // -// If you release an Elastic IP address for use in a VPC, you might be able -// to recover it. To recover an Elastic IP address that you released, specify -// it in the Address parameter. Note that you cannot recover an Elastic IP address -// that you released after it is allocated to another AWS account. -// // For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -333,7 +337,7 @@ const opAllocateHosts = "AllocateHosts" // AllocateHostsRequest generates a "aws/request.Request" representing the // client's request for the AllocateHosts operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -373,9 +377,8 @@ func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Requ // AllocateHosts API operation for Amazon Elastic Compute Cloud. // -// Allocates a Dedicated Host to your account. At minimum you need to specify -// the instance size type, Availability Zone, and quantity of hosts you want -// to allocate. +// Allocates a Dedicated Host to your account. At a minimum, specify the instance +// size type, Availability Zone, and quantity of hosts to allocate. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -409,7 +412,7 @@ const opAssignIpv6Addresses = "AssignIpv6Addresses" // AssignIpv6AddressesRequest generates a "aws/request.Request" representing the // client's request for the AssignIpv6Addresses operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -490,7 +493,7 @@ const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" // AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the // client's request for the AssignPrivateIpAddresses operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -576,7 +579,7 @@ const opAssociateAddress = "AssociateAddress" // AssociateAddressRequest generates a "aws/request.Request" representing the // client's request for the AssociateAddress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -617,6 +620,7 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // AssociateAddress API operation for Amazon Elastic Compute Cloud. // // Associates an Elastic IP address with an instance or a network interface. +// Before you can use an Elastic IP address, you must allocate it to your account. // // An Elastic IP address is for use in either the EC2-Classic platform or in // a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) @@ -673,7 +677,7 @@ const opAssociateDhcpOptions = "AssociateDhcpOptions" // AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the AssociateDhcpOptions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -760,7 +764,7 @@ const opAssociateIamInstanceProfile = "AssociateIamInstanceProfile" // AssociateIamInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the AssociateIamInstanceProfile operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -835,7 +839,7 @@ const opAssociateRouteTable = "AssociateRouteTable" // AssociateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the AssociateRouteTable operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -881,7 +885,7 @@ func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req * // an association ID, which you need in order to disassociate the route table // from the subnet later. A route table can be associated with multiple subnets. // -// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -916,7 +920,7 @@ const opAssociateSubnetCidrBlock = "AssociateSubnetCidrBlock" // AssociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the AssociateSubnetCidrBlock operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -992,7 +996,7 @@ const opAssociateVpcCidrBlock = "AssociateVpcCidrBlock" // AssociateVpcCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the AssociateVpcCidrBlock operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1072,7 +1076,7 @@ const opAttachClassicLinkVpc = "AttachClassicLinkVpc" // AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the // client's request for the AttachClassicLinkVpc operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1157,7 +1161,7 @@ const opAttachInternetGateway = "AttachInternetGateway" // AttachInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the AttachInternetGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1199,8 +1203,8 @@ func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (r // AttachInternetGateway API operation for Amazon Elastic Compute Cloud. // -// Attaches an Internet gateway to a VPC, enabling connectivity between the -// Internet and the VPC. For more information about your VPC and Internet gateway, +// Attaches an internet gateway to a VPC, enabling connectivity between the +// internet and the VPC. For more information about your VPC and internet gateway, // see the Amazon Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1235,7 +1239,7 @@ const opAttachNetworkInterface = "AttachNetworkInterface" // AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the AttachNetworkInterface operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1309,7 +1313,7 @@ const opAttachVolume = "AttachVolume" // AttachVolumeRequest generates a "aws/request.Request" representing the // client's request for the AttachVolume operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1374,8 +1378,6 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // the product. For example, you can't detach a volume from a Windows instance // and attach it to a Linux instance. // -// For an overview of the AWS Marketplace, see Introducing AWS Marketplace (https://aws.amazon.com/marketplace/help/200900000). -// // For more information about EBS volumes, see Attaching Amazon EBS Volumes // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -1412,7 +1414,7 @@ const opAttachVpnGateway = "AttachVpnGateway" // AttachVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the AttachVpnGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1490,7 +1492,7 @@ const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" // AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the // client's request for the AuthorizeSecurityGroupEgress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1583,7 +1585,7 @@ const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" // AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the // client's request for the AuthorizeSecurityGroupIngress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1677,7 +1679,7 @@ const opBundleInstance = "BundleInstance" // BundleInstanceRequest generates a "aws/request.Request" representing the // client's request for the BundleInstance operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1725,8 +1727,6 @@ func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Re // This action is not applicable for Linux/Unix instances or Windows instances // that are backed by Amazon EBS. // -// For more information, see Creating an Instance Store-Backed Windows AMI (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_InstanceStoreBacked_WinAMI.html). -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1759,7 +1759,7 @@ const opCancelBundleTask = "CancelBundleTask" // CancelBundleTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelBundleTask operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1833,7 +1833,7 @@ const opCancelConversionTask = "CancelConversionTask" // CancelConversionTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelConversionTask operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1916,7 +1916,7 @@ const opCancelExportTask = "CancelExportTask" // CancelExportTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelExportTask operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1995,7 +1995,7 @@ const opCancelImportTask = "CancelImportTask" // CancelImportTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelImportTask operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2069,7 +2069,7 @@ const opCancelReservedInstancesListing = "CancelReservedInstancesListing" // CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the // client's request for the CancelReservedInstancesListing operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2147,7 +2147,7 @@ const opCancelSpotFleetRequests = "CancelSpotFleetRequests" // CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the // client's request for the CancelSpotFleetRequests operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2228,7 +2228,7 @@ const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" // CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the // client's request for the CancelSpotInstanceRequests operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2268,11 +2268,7 @@ func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequest // CancelSpotInstanceRequests API operation for Amazon Elastic Compute Cloud. // -// Cancels one or more Spot Instance requests. Spot Instances are instances -// that Amazon EC2 starts on your behalf when the maximum price that you specify -// exceeds the current Spot price. For more information, see Spot Instance Requests -// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) in -// the Amazon Elastic Compute Cloud User Guide. +// Cancels one or more Spot Instance requests. // // Canceling a Spot Instance request does not terminate running Spot Instances // associated with the request. @@ -2309,7 +2305,7 @@ const opConfirmProductInstance = "ConfirmProductInstance" // ConfirmProductInstanceRequest generates a "aws/request.Request" representing the // client's request for the ConfirmProductInstance operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2385,7 +2381,7 @@ const opCopyFpgaImage = "CopyFpgaImage" // CopyFpgaImageRequest generates a "aws/request.Request" representing the // client's request for the CopyFpgaImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2459,7 +2455,7 @@ const opCopyImage = "CopyImage" // CopyImageRequest generates a "aws/request.Request" representing the // client's request for the CopyImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2539,7 +2535,7 @@ const opCopySnapshot = "CopySnapshot" // CopySnapshotRequest generates a "aws/request.Request" representing the // client's request for the CopySnapshot operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2594,7 +2590,7 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // To copy an encrypted snapshot that has been shared from another account, // you must have permissions for the CMK used to encrypt the snapshot. // -// Snapshots created by the CopySnapshot action have an arbitrary volume ID +// Snapshots created by copying another snapshot have an arbitrary volume ID // that should not be used for any purpose. // // For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) @@ -2632,7 +2628,7 @@ const opCreateCustomerGateway = "CreateCustomerGateway" // CreateCustomerGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateCustomerGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2730,7 +2726,7 @@ const opCreateDefaultSubnet = "CreateDefaultSubnet" // CreateDefaultSubnetRequest generates a "aws/request.Request" representing the // client's request for the CreateDefaultSubnet operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2808,7 +2804,7 @@ const opCreateDefaultVpc = "CreateDefaultVpc" // CreateDefaultVpcRequest generates a "aws/request.Request" representing the // client's request for the CreateDefaultVpc operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2854,12 +2850,12 @@ func (c *EC2) CreateDefaultVpcRequest(input *CreateDefaultVpcInput) (req *reques // in the Amazon Virtual Private Cloud User Guide. You cannot specify the components // of the default VPC yourself. // -// You can create a default VPC if you deleted your previous default VPC. You -// cannot have more than one default VPC per region. +// iIf you deleted your previous default VPC, you can create a default VPC. +// You cannot have more than one default VPC per Region. // // If your account supports EC2-Classic, you cannot use this action to create -// a default VPC in a region that supports EC2-Classic. If you want a default -// VPC in a region that supports EC2-Classic, see "I really want a default VPC +// a default VPC in a Region that supports EC2-Classic. If you want a default +// VPC in a Region that supports EC2-Classic, see "I really want a default VPC // for my existing EC2 account. Is that possible?" in the Default VPCs FAQ (http://aws.amazon.com/vpc/faqs/#Default_VPCs). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2894,7 +2890,7 @@ const opCreateDhcpOptions = "CreateDhcpOptions" // CreateDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the CreateDhcpOptions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2943,9 +2939,9 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // * domain-name-servers - The IP addresses of up to four domain name servers, // or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. // If specifying more than one domain name server, specify the IP addresses -// in a single parameter, separated by commas. If you want your instance -// to receive a custom DNS hostname as specified in domain-name, you must -// set domain-name-servers to a custom DNS server. +// in a single parameter, separated by commas. ITo have your instance to +// receive a custom DNS hostname as specified in domain-name, you must set +// domain-name-servers to a custom DNS server. // // * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify // ec2.internal. If you're using AmazonProvidedDNS in another region, specify @@ -2969,10 +2965,9 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // // Your VPC automatically starts out with a set of DHCP options that includes // only a DNS server that we provide (AmazonProvidedDNS). If you create a set -// of options, and if your VPC has an Internet gateway, make sure to set the +// of options, and if your VPC has an internet gateway, make sure to set the // domain-name-servers option either to AmazonProvidedDNS or to a domain name -// server of your choice. For more information about DHCP options, see DHCP -// Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// server of your choice. For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3007,7 +3002,7 @@ const opCreateEgressOnlyInternetGateway = "CreateEgressOnlyInternetGateway" // CreateEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateEgressOnlyInternetGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3047,9 +3042,9 @@ func (c *EC2) CreateEgressOnlyInternetGatewayRequest(input *CreateEgressOnlyInte // CreateEgressOnlyInternetGateway API operation for Amazon Elastic Compute Cloud. // -// [IPv6 only] Creates an egress-only Internet gateway for your VPC. An egress-only -// Internet gateway is used to enable outbound communication over IPv6 from -// instances in your VPC to the Internet, and prevents hosts outside of your +// [IPv6 only] Creates an egress-only internet gateway for your VPC. An egress-only +// internet gateway is used to enable outbound communication over IPv6 from +// instances in your VPC to the internet, and prevents hosts outside of your // VPC from initiating an IPv6 connection with your instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3080,11 +3075,91 @@ func (c *EC2) CreateEgressOnlyInternetGatewayWithContext(ctx aws.Context, input return out, req.Send() } +const opCreateFleet = "CreateFleet" + +// CreateFleetRequest generates a "aws/request.Request" representing the +// client's request for the CreateFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFleet for more information on using the CreateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFleetRequest method. +// req, resp := client.CreateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFleet +func (c *EC2) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, output *CreateFleetOutput) { + op := &request.Operation{ + Name: opCreateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFleetInput{} + } + + output = &CreateFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFleet API operation for Amazon Elastic Compute Cloud. +// +// Launches an EC2 Fleet. +// +// You can create a single EC2 Fleet that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// For more information, see Launching an EC2 Fleet (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateFleet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFleet +func (c *EC2) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + return out, req.Send() +} + +// CreateFleetWithContext is the same as CreateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateFlowLogs = "CreateFlowLogs" // CreateFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the CreateFlowLogs operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3124,16 +3199,22 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re // CreateFlowLogs API operation for Amazon Elastic Compute Cloud. // -// Creates one or more flow logs to capture IP traffic for a specific network -// interface, subnet, or VPC. Flow logs are delivered to a specified log group -// in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, -// a log stream is created in CloudWatch Logs for each network interface in -// the subnet or VPC. Log streams can include information about accepted and -// rejected traffic to a network interface. You can view the data in your log -// streams using Amazon CloudWatch Logs. +// Creates one or more flow logs to capture information about IP traffic for +// a specific network interface, subnet, or VPC. +// +// Flow log data for a monitored network interface is recorded as flow log records, +// which are log events consisting of fields that describe the traffic flow. +// For more information, see Flow Log Records (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html#flow-log-records) +// in the Amazon Virtual Private Cloud User Guide. +// +// When publishing to CloudWatch Logs, flow log records are published to a log +// group, and each network interface has a unique log stream in the log group. +// When publishing to Amazon S3, flow log records for all of the monitored network +// interfaces are published to a single log file object that is stored in the +// specified bucket. // -// In your request, you must also specify an IAM role that has permission to -// publish logs to CloudWatch Logs. +// For more information, see VPC Flow Logs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html) +// in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3167,7 +3248,7 @@ const opCreateFpgaImage = "CreateFpgaImage" // CreateFpgaImageRequest generates a "aws/request.Request" representing the // client's request for the CreateFpgaImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3248,7 +3329,7 @@ const opCreateImage = "CreateImage" // CreateImageRequest generates a "aws/request.Request" representing the // client's request for the CreateImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3331,7 +3412,7 @@ const opCreateInstanceExportTask = "CreateInstanceExportTask" // CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the // client's request for the CreateInstanceExportTask operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3410,7 +3491,7 @@ const opCreateInternetGateway = "CreateInternetGateway" // CreateInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateInternetGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3450,10 +3531,10 @@ func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (r // CreateInternetGateway API operation for Amazon Elastic Compute Cloud. // -// Creates an Internet gateway for use with a VPC. After creating the Internet +// Creates an internet gateway for use with a VPC. After creating the internet // gateway, you attach it to a VPC using AttachInternetGateway. // -// For more information about your VPC and Internet gateway, see the Amazon +// For more information about your VPC and internet gateway, see the Amazon // Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3488,7 +3569,7 @@ const opCreateKeyPair = "CreateKeyPair" // CreateKeyPairRequest generates a "aws/request.Request" representing the // client's request for the CreateKeyPair operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3574,7 +3655,7 @@ const opCreateLaunchTemplate = "CreateLaunchTemplate" // CreateLaunchTemplateRequest generates a "aws/request.Request" representing the // client's request for the CreateLaunchTemplate operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3650,7 +3731,7 @@ const opCreateLaunchTemplateVersion = "CreateLaunchTemplateVersion" // CreateLaunchTemplateVersionRequest generates a "aws/request.Request" representing the // client's request for the CreateLaunchTemplateVersion operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3728,7 +3809,7 @@ const opCreateNatGateway = "CreateNatGateway" // CreateNatGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateNatGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3768,11 +3849,12 @@ func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *reques // CreateNatGateway API operation for Amazon Elastic Compute Cloud. // -// Creates a NAT gateway in the specified subnet. A NAT gateway can be used -// to enable instances in a private subnet to connect to the Internet. This -// action creates a network interface in the specified subnet with a private -// IP address from the IP address range of the subnet. For more information, -// see NAT Gateways (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html) +// Creates a NAT gateway in the specified public subnet. This action creates +// a network interface in the specified subnet with a private IP address from +// the IP address range of the subnet. Internet-bound traffic from a private +// subnet can be routed to the NAT gateway, therefore enabling instances in +// the private subnet to connect to the internet. For more information, see +// NAT Gateways (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3807,7 +3889,7 @@ const opCreateNetworkAcl = "CreateNetworkAcl" // CreateNetworkAclRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkAcl operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3850,7 +3932,7 @@ func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *reques // Creates a network ACL in a VPC. Network ACLs provide an optional layer of // security (in addition to security groups) for the instances in your VPC. // -// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3885,7 +3967,7 @@ const opCreateNetworkAclEntry = "CreateNetworkAclEntry" // CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkAclEntry operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3977,7 +4059,7 @@ const opCreateNetworkInterface = "CreateNetworkInterface" // CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkInterface operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4055,7 +4137,7 @@ const opCreateNetworkInterfacePermission = "CreateNetworkInterfacePermission" // CreateNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkInterfacePermission operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4095,8 +4177,8 @@ func (c *EC2) CreateNetworkInterfacePermissionRequest(input *CreateNetworkInterf // CreateNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. // -// Grants an AWS authorized partner account permission to attach the specified -// network interface to an instance in their account. +// Grants an AWS-authorized account permission to attach the specified network +// interface to an instance in their account. // // You can grant permission to a single AWS account only, and only one account // at a time. @@ -4133,7 +4215,7 @@ const opCreatePlacementGroup = "CreatePlacementGroup" // CreatePlacementGroupRequest generates a "aws/request.Request" representing the // client's request for the CreatePlacementGroup operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4217,7 +4299,7 @@ const opCreateReservedInstancesListing = "CreateReservedInstancesListing" // CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the // client's request for the CreateReservedInstancesListing operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4314,7 +4396,7 @@ const opCreateRoute = "CreateRoute" // CreateRouteRequest generates a "aws/request.Request" representing the // client's request for the CreateRoute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4356,9 +4438,9 @@ func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, // // Creates a route in a route table within a VPC. // -// You must specify one of the following targets: Internet gateway or virtual +// You must specify one of the following targets: internet gateway or virtual // private gateway, NAT instance, NAT gateway, VPC peering connection, network -// interface, or egress-only Internet gateway. +// interface, or egress-only internet gateway. // // When determining how to route traffic, we use the route with the most specific // match. For example, traffic is destined for the IPv4 address 192.0.2.3, and @@ -4407,7 +4489,7 @@ const opCreateRouteTable = "CreateRouteTable" // CreateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the CreateRouteTable operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4450,7 +4532,7 @@ func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *reques // Creates a route table for the specified VPC. After you create a route table, // you can add routes and associate the table with a subnet. // -// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4485,7 +4567,7 @@ const opCreateSecurityGroup = "CreateSecurityGroup" // CreateSecurityGroupRequest generates a "aws/request.Request" representing the // client's request for the CreateSecurityGroup operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4585,7 +4667,7 @@ const opCreateSnapshot = "CreateSnapshot" // CreateSnapshotRequest generates a "aws/request.Request" representing the // client's request for the CreateSnapshot operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4650,6 +4732,10 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re // encrypted. Your encrypted volumes and any associated snapshots always remain // protected. // +// You can tag your snapshots during creation. For more information, see Tagging +// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +// // For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) // and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -4686,7 +4772,7 @@ const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" // CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the CreateSpotDatafeedSubscription operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4729,7 +4815,7 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // Creates a data feed for Spot Instances, enabling you to view Spot Instance // usage logs. You can create one data feed per AWS account. For more information, // see Spot Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4763,7 +4849,7 @@ const opCreateSubnet = "CreateSubnet" // CreateSubnetRequest generates a "aws/request.Request" representing the // client's request for the CreateSubnet operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4805,13 +4891,13 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // // Creates a subnet in an existing VPC. // -// When you create each subnet, you provide the VPC ID and the IPv4 CIDR block -// you want for the subnet. After you create a subnet, you can't change its -// CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a -// VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create -// more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. -// The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 -// IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). +// When you create each subnet, you provide the VPC ID and IPv4 CIDR block for +// the subnet. After you create a subnet, you can't change its CIDR block. The +// size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR +// block, or a subset of a VPC's IPv4 CIDR block. If you create more than one +// subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest +// IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), +// and the largest uses a /16 netmask (65,536 IPv4 addresses). // // If you've associated an IPv6 CIDR block with your VPC, you can create a subnet // with an IPv6 CIDR block that uses a /64 prefix length. @@ -4863,7 +4949,7 @@ const opCreateTags = "CreateTags" // CreateTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateTags operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4947,7 +5033,7 @@ const opCreateVolume = "CreateVolume" // CreateVolumeRequest generates a "aws/request.Request" representing the // client's request for the CreateVolume operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5002,7 +5088,8 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // in the Amazon Elastic Compute Cloud User Guide. // // You can tag your volumes during creation. For more information, see Tagging -// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. // // For more information, see Creating an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -5039,7 +5126,7 @@ const opCreateVpc = "CreateVpc" // CreateVpcRequest generates a "aws/request.Request" representing the // client's request for the CreateVpc operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5081,8 +5168,8 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // // Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can // create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 -// netmask (65,536 IPv4 addresses). To help you decide how big to make your -// VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// netmask (65,536 IPv4 addresses). For more information about how large to +// make your VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) // in the Amazon Virtual Private Cloud User Guide. // // You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. @@ -5090,8 +5177,8 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC. // // By default, each instance you launch in the VPC has the default DHCP options, -// which includes only a default DNS server that we provide (AmazonProvidedDNS). -// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// which include only a default DNS server that we provide (AmazonProvidedDNS). +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // You can specify the instance tenancy value for the VPC when you create it. @@ -5131,7 +5218,7 @@ const opCreateVpcEndpoint = "CreateVpcEndpoint" // CreateVpcEndpointRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5221,7 +5308,7 @@ const opCreateVpcEndpointConnectionNotification = "CreateVpcEndpointConnectionNo // CreateVpcEndpointConnectionNotificationRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpointConnectionNotification operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5301,7 +5388,7 @@ const opCreateVpcEndpointServiceConfiguration = "CreateVpcEndpointServiceConfigu // CreateVpcEndpointServiceConfigurationRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpointServiceConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5382,7 +5469,7 @@ const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" // CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcPeeringConnection operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5424,10 +5511,14 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio // // Requests a VPC peering connection between two VPCs: a requester VPC that // you own and an accepter VPC with which to create the connection. The accepter -// VPC can belong to another AWS account and can be in a different region to +// VPC can belong to another AWS account and can be in a different Region to // the requester VPC. The requester VPC and accepter VPC cannot have overlapping // CIDR blocks. // +// Limitations and rules apply to a VPC peering connection. For more information, +// see the limitations (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/vpc-peering-basics.html#vpc-peering-limitations) +// section in the VPC Peering Guide. +// // The owner of the accepter VPC must accept the peering request to activate // the peering connection. The VPC peering connection request expires after // 7 days, after which it cannot be accepted or rejected. @@ -5467,7 +5558,7 @@ const opCreateVpnConnection = "CreateVpnConnection" // CreateVpnConnectionRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnConnection operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5559,7 +5650,7 @@ const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" // CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnConnectionRoute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5642,7 +5733,7 @@ const opCreateVpnGateway = "CreateVpnGateway" // CreateVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5722,7 +5813,7 @@ const opDeleteCustomerGateway = "DeleteCustomerGateway" // DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteCustomerGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5799,7 +5890,7 @@ const opDeleteDhcpOptions = "DeleteDhcpOptions" // DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the DeleteDhcpOptions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5878,7 +5969,7 @@ const opDeleteEgressOnlyInternetGateway = "DeleteEgressOnlyInternetGateway" // DeleteEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteEgressOnlyInternetGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5918,7 +6009,7 @@ func (c *EC2) DeleteEgressOnlyInternetGatewayRequest(input *DeleteEgressOnlyInte // DeleteEgressOnlyInternetGateway API operation for Amazon Elastic Compute Cloud. // -// Deletes an egress-only Internet gateway. +// Deletes an egress-only internet gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5948,11 +6039,91 @@ func (c *EC2) DeleteEgressOnlyInternetGatewayWithContext(ctx aws.Context, input return out, req.Send() } +const opDeleteFleets = "DeleteFleets" + +// DeleteFleetsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFleets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFleets for more information on using the DeleteFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFleetsRequest method. +// req, resp := client.DeleteFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFleets +func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Request, output *DeleteFleetsOutput) { + op := &request.Operation{ + Name: opDeleteFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFleetsInput{} + } + + output = &DeleteFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFleets API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified EC2 Fleet. +// +// After you delete an EC2 Fleet, it launches no new instances. You must specify +// whether an EC2 Fleet should also terminate its instances. If you terminate +// the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, +// the EC2 Fleet enters the deleted_running state, and the instances continue +// to run until they are interrupted or you terminate them manually. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFleets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFleets +func (c *EC2) DeleteFleets(input *DeleteFleetsInput) (*DeleteFleetsOutput, error) { + req, out := c.DeleteFleetsRequest(input) + return out, req.Send() +} + +// DeleteFleetsWithContext is the same as DeleteFleets with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFleetsWithContext(ctx aws.Context, input *DeleteFleetsInput, opts ...request.Option) (*DeleteFleetsOutput, error) { + req, out := c.DeleteFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteFlowLogs = "DeleteFlowLogs" // DeleteFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the DeleteFlowLogs operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6026,7 +6197,7 @@ const opDeleteFpgaImage = "DeleteFpgaImage" // DeleteFpgaImageRequest generates a "aws/request.Request" representing the // client's request for the DeleteFpgaImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6100,7 +6271,7 @@ const opDeleteInternetGateway = "DeleteInternetGateway" // DeleteInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteInternetGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6142,7 +6313,7 @@ func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (r // DeleteInternetGateway API operation for Amazon Elastic Compute Cloud. // -// Deletes the specified Internet gateway. You must detach the Internet gateway +// Deletes the specified internet gateway. You must detach the internet gateway // from the VPC before you can delete it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6177,7 +6348,7 @@ const opDeleteKeyPair = "DeleteKeyPair" // DeleteKeyPairRequest generates a "aws/request.Request" representing the // client's request for the DeleteKeyPair operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6253,7 +6424,7 @@ const opDeleteLaunchTemplate = "DeleteLaunchTemplate" // DeleteLaunchTemplateRequest generates a "aws/request.Request" representing the // client's request for the DeleteLaunchTemplate operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6328,7 +6499,7 @@ const opDeleteLaunchTemplateVersions = "DeleteLaunchTemplateVersions" // DeleteLaunchTemplateVersionsRequest generates a "aws/request.Request" representing the // client's request for the DeleteLaunchTemplateVersions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6405,7 +6576,7 @@ const opDeleteNatGateway = "DeleteNatGateway" // DeleteNatGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteNatGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6481,7 +6652,7 @@ const opDeleteNetworkAcl = "DeleteNetworkAcl" // DeleteNetworkAclRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkAcl operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6558,7 +6729,7 @@ const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" // DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkAclEntry operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6635,7 +6806,7 @@ const opDeleteNetworkInterface = "DeleteNetworkInterface" // DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkInterface operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6712,7 +6883,7 @@ const opDeleteNetworkInterfacePermission = "DeleteNetworkInterfacePermission" // DeleteNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkInterfacePermission operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6789,7 +6960,7 @@ const opDeletePlacementGroup = "DeletePlacementGroup" // DeletePlacementGroupRequest generates a "aws/request.Request" representing the // client's request for the DeletePlacementGroup operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6868,7 +7039,7 @@ const opDeleteRoute = "DeleteRoute" // DeleteRouteRequest generates a "aws/request.Request" representing the // client's request for the DeleteRoute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6944,7 +7115,7 @@ const opDeleteRouteTable = "DeleteRouteTable" // DeleteRouteTableRequest generates a "aws/request.Request" representing the // client's request for the DeleteRouteTable operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7022,7 +7193,7 @@ const opDeleteSecurityGroup = "DeleteSecurityGroup" // DeleteSecurityGroupRequest generates a "aws/request.Request" representing the // client's request for the DeleteSecurityGroup operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7102,7 +7273,7 @@ const opDeleteSnapshot = "DeleteSnapshot" // DeleteSnapshotRequest generates a "aws/request.Request" representing the // client's request for the DeleteSnapshot operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7192,7 +7363,7 @@ const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" // DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the DeleteSpotDatafeedSubscription operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7268,7 +7439,7 @@ const opDeleteSubnet = "DeleteSubnet" // DeleteSubnetRequest generates a "aws/request.Request" representing the // client's request for the DeleteSubnet operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7345,7 +7516,7 @@ const opDeleteTags = "DeleteTags" // DeleteTagsRequest generates a "aws/request.Request" representing the // client's request for the DeleteTags operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7425,7 +7596,7 @@ const opDeleteVolume = "DeleteVolume" // DeleteVolumeRequest generates a "aws/request.Request" representing the // client's request for the DeleteVolume operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7470,7 +7641,7 @@ func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Reques // Deletes the specified EBS volume. The volume must be in the available state // (not attached to an instance). // -// The volume may remain in the deleting state for several minutes. +// The volume can remain in the deleting state for several minutes. // // For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -7507,7 +7678,7 @@ const opDeleteVpc = "DeleteVpc" // DeleteVpcRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpc operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7587,7 +7758,7 @@ const opDeleteVpcEndpointConnectionNotifications = "DeleteVpcEndpointConnectionN // DeleteVpcEndpointConnectionNotificationsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpointConnectionNotifications operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7661,7 +7832,7 @@ const opDeleteVpcEndpointServiceConfigurations = "DeleteVpcEndpointServiceConfig // DeleteVpcEndpointServiceConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpointServiceConfigurations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7738,7 +7909,7 @@ const opDeleteVpcEndpoints = "DeleteVpcEndpoints" // DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpoints operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7815,7 +7986,7 @@ const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" // DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcPeeringConnection operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7858,7 +8029,8 @@ func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectio // Deletes a VPC peering connection. Either the owner of the requester VPC or // the owner of the accepter VPC can delete the VPC peering connection if it's // in the active state. The owner of the requester VPC can delete a VPC peering -// connection in the pending-acceptance state. +// connection in the pending-acceptance state. You cannot delete a VPC peering +// connection that's in the failed state. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7892,7 +8064,7 @@ const opDeleteVpnConnection = "DeleteVpnConnection" // DeleteVpnConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnConnection operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -7977,7 +8149,7 @@ const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" // DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnConnectionRoute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8056,7 +8228,7 @@ const opDeleteVpnGateway = "DeleteVpnGateway" // DeleteVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8136,7 +8308,7 @@ const opDeregisterImage = "DeregisterImage" // DeregisterImageRequest generates a "aws/request.Request" representing the // client's request for the DeregisterImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8220,7 +8392,7 @@ const opDescribeAccountAttributes = "DescribeAccountAttributes" // DescribeAccountAttributesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAccountAttributes operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8268,7 +8440,7 @@ func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI // // * default-vpc: The ID of the default VPC for your account, or none. // -// * max-instances: The maximum number of On-Demand instances that you can +// * max-instances: The maximum number of On-Demand Instances that you can // run. // // * vpc-max-security-groups-per-interface: The maximum number of security @@ -8312,7 +8484,7 @@ const opDescribeAddresses = "DescribeAddresses" // DescribeAddressesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAddresses operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8386,11 +8558,99 @@ func (c *EC2) DescribeAddressesWithContext(ctx aws.Context, input *DescribeAddre return out, req.Send() } +const opDescribeAggregateIdFormat = "DescribeAggregateIdFormat" + +// DescribeAggregateIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAggregateIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAggregateIdFormat for more information on using the DescribeAggregateIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAggregateIdFormatRequest method. +// req, resp := client.DescribeAggregateIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAggregateIdFormat +func (c *EC2) DescribeAggregateIdFormatRequest(input *DescribeAggregateIdFormatInput) (req *request.Request, output *DescribeAggregateIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeAggregateIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAggregateIdFormatInput{} + } + + output = &DescribeAggregateIdFormatOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAggregateIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Describes the longer ID format settings for all resource types in a specific +// region. This request is useful for performing a quick audit to determine +// whether a specific region is fully opted in for longer IDs (17-character +// IDs). +// +// This request only returns information about resource types that support longer +// IDs. +// +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAggregateIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAggregateIdFormat +func (c *EC2) DescribeAggregateIdFormat(input *DescribeAggregateIdFormatInput) (*DescribeAggregateIdFormatOutput, error) { + req, out := c.DescribeAggregateIdFormatRequest(input) + return out, req.Send() +} + +// DescribeAggregateIdFormatWithContext is the same as DescribeAggregateIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAggregateIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAggregateIdFormatWithContext(ctx aws.Context, input *DescribeAggregateIdFormatInput, opts ...request.Option) (*DescribeAggregateIdFormatOutput, error) { + req, out := c.DescribeAggregateIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAvailabilityZones = "DescribeAvailabilityZones" // DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAvailabilityZones operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8470,7 +8730,7 @@ const opDescribeBundleTasks = "DescribeBundleTasks" // DescribeBundleTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeBundleTasks operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8549,7 +8809,7 @@ const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" // DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeClassicLinkInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8591,7 +8851,7 @@ func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInst // // Describes one or more of your linked EC2-Classic instances. This request // only returns information about EC2-Classic instances linked to a VPC through -// ClassicLink; you cannot use this request to return information about other +// ClassicLink. You cannot use this request to return information about other // instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8626,7 +8886,7 @@ const opDescribeConversionTasks = "DescribeConversionTasks" // DescribeConversionTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeConversionTasks operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8704,7 +8964,7 @@ const opDescribeCustomerGateways = "DescribeCustomerGateways" // DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeCustomerGateways operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8782,7 +9042,7 @@ const opDescribeDhcpOptions = "DescribeDhcpOptions" // DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeDhcpOptions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8824,7 +9084,7 @@ func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req * // // Describes one or more of your DHCP options sets. // -// For more information about DHCP options sets, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8859,7 +9119,7 @@ const opDescribeEgressOnlyInternetGateways = "DescribeEgressOnlyInternetGateways // DescribeEgressOnlyInternetGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeEgressOnlyInternetGateways operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -8899,7 +9159,7 @@ func (c *EC2) DescribeEgressOnlyInternetGatewaysRequest(input *DescribeEgressOnl // DescribeEgressOnlyInternetGateways API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your egress-only Internet gateways. +// Describes one or more of your egress-only internet gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8933,7 +9193,7 @@ const opDescribeElasticGpus = "DescribeElasticGpus" // DescribeElasticGpusRequest generates a "aws/request.Request" representing the // client's request for the DescribeElasticGpus operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9008,7 +9268,7 @@ const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeExportTasks operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9078,11 +9338,233 @@ func (c *EC2) DescribeExportTasksWithContext(ctx aws.Context, input *DescribeExp return out, req.Send() } +const opDescribeFleetHistory = "DescribeFleetHistory" + +// DescribeFleetHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetHistory for more information on using the DescribeFleetHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetHistoryRequest method. +// req, resp := client.DescribeFleetHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetHistory +func (c *EC2) DescribeFleetHistoryRequest(input *DescribeFleetHistoryInput) (req *request.Request, output *DescribeFleetHistoryOutput) { + op := &request.Operation{ + Name: opDescribeFleetHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetHistoryInput{} + } + + output = &DescribeFleetHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetHistory API operation for Amazon Elastic Compute Cloud. +// +// Describes the events for the specified EC2 Fleet during the specified time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFleetHistory for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetHistory +func (c *EC2) DescribeFleetHistory(input *DescribeFleetHistoryInput) (*DescribeFleetHistoryOutput, error) { + req, out := c.DescribeFleetHistoryRequest(input) + return out, req.Send() +} + +// DescribeFleetHistoryWithContext is the same as DescribeFleetHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetHistoryWithContext(ctx aws.Context, input *DescribeFleetHistoryInput, opts ...request.Option) (*DescribeFleetHistoryOutput, error) { + req, out := c.DescribeFleetHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetInstances = "DescribeFleetInstances" + +// DescribeFleetInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetInstances for more information on using the DescribeFleetInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetInstancesRequest method. +// req, resp := client.DescribeFleetInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetInstances +func (c *EC2) DescribeFleetInstancesRequest(input *DescribeFleetInstancesInput) (req *request.Request, output *DescribeFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetInstancesInput{} + } + + output = &DescribeFleetInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes the running instances for the specified EC2 Fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFleetInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetInstances +func (c *EC2) DescribeFleetInstances(input *DescribeFleetInstancesInput) (*DescribeFleetInstancesOutput, error) { + req, out := c.DescribeFleetInstancesRequest(input) + return out, req.Send() +} + +// DescribeFleetInstancesWithContext is the same as DescribeFleetInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetInstancesWithContext(ctx aws.Context, input *DescribeFleetInstancesInput, opts ...request.Option) (*DescribeFleetInstancesOutput, error) { + req, out := c.DescribeFleetInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleets = "DescribeFleets" + +// DescribeFleetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleets for more information on using the DescribeFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetsRequest method. +// req, resp := client.DescribeFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleets +func (c *EC2) DescribeFleetsRequest(input *DescribeFleetsInput) (req *request.Request, output *DescribeFleetsOutput) { + op := &request.Operation{ + Name: opDescribeFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetsInput{} + } + + output = &DescribeFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleets API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your EC2 Fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFleets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleets +func (c *EC2) DescribeFleets(input *DescribeFleetsInput) (*DescribeFleetsOutput, error) { + req, out := c.DescribeFleetsRequest(input) + return out, req.Send() +} + +// DescribeFleetsWithContext is the same as DescribeFleets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetsWithContext(ctx aws.Context, input *DescribeFleetsInput, opts ...request.Option) (*DescribeFleetsOutput, error) { + req, out := c.DescribeFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeFlowLogs = "DescribeFlowLogs" // DescribeFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the DescribeFlowLogs operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9158,7 +9640,7 @@ const opDescribeFpgaImageAttribute = "DescribeFpgaImageAttribute" // DescribeFpgaImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeFpgaImageAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9232,7 +9714,7 @@ const opDescribeFpgaImages = "DescribeFpgaImages" // DescribeFpgaImagesRequest generates a "aws/request.Request" representing the // client's request for the DescribeFpgaImages operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9308,7 +9790,7 @@ const opDescribeHostReservationOfferings = "DescribeHostReservationOfferings" // DescribeHostReservationOfferingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHostReservationOfferings operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9348,14 +9830,14 @@ func (c *EC2) DescribeHostReservationOfferingsRequest(input *DescribeHostReserva // DescribeHostReservationOfferings API operation for Amazon Elastic Compute Cloud. // -// Describes the Dedicated Host Reservations that are available to purchase. +// Describes the Dedicated Host reservations that are available to purchase. // -// The results describe all the Dedicated Host Reservation offerings, including +// The results describe all the Dedicated Host reservation offerings, including // offerings that may not match the instance family and region of your Dedicated -// Hosts. When purchasing an offering, ensure that the the instance family and -// region of the offering matches that of the Dedicated Host/s it will be associated -// with. For an overview of supported instance types, see Dedicated Hosts Overview -// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html) +// Hosts. When purchasing an offering, ensure that the instance family and Region +// of the offering matches that of the Dedicated Hosts with which it is to be +// associated . For more information about supported instance types, see Dedicated +// Hosts Overview (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9390,7 +9872,7 @@ const opDescribeHostReservations = "DescribeHostReservations" // DescribeHostReservationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHostReservations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9430,8 +9912,7 @@ func (c *EC2) DescribeHostReservationsRequest(input *DescribeHostReservationsInp // DescribeHostReservations API operation for Amazon Elastic Compute Cloud. // -// Describes Dedicated Host Reservations which are associated with Dedicated -// Hosts in your account. +// Describes reservations that are associated with Dedicated Hosts in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9465,7 +9946,7 @@ const opDescribeHosts = "DescribeHosts" // DescribeHostsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHosts operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9509,7 +9990,7 @@ func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Requ // // The results describe only the Dedicated Hosts in the region you're currently // using. All listed instances consume capacity on your Dedicated Host. Dedicated -// Hosts that have recently been released will be listed with the state released. +// Hosts that have recently been released are listed with the state released. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9543,7 +10024,7 @@ const opDescribeIamInstanceProfileAssociations = "DescribeIamInstanceProfileAsso // DescribeIamInstanceProfileAssociationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeIamInstanceProfileAssociations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9617,7 +10098,7 @@ const opDescribeIdFormat = "DescribeIdFormat" // DescribeIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeIdFormat operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9662,8 +10143,13 @@ func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *reques // request only returns information about resource types whose ID formats can // be modified; it does not return information about other resource types. // -// The following resource types support longer IDs: instance | reservation | -// snapshot | volume. +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. // // These settings apply to the IAM user who makes the request; they do not apply // to the entire AWS account. By default, an IAM user defaults to the same settings @@ -9704,7 +10190,7 @@ const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat" // DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeIdentityIdFormat operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9751,8 +10237,13 @@ func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInp // other resource types. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) // in the Amazon Elastic Compute Cloud User Guide. // -// The following resource types support longer IDs: instance | reservation | -// snapshot | volume. +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. // // These settings apply to the principal specified in the request. They do not // apply to the principal that makes the request. @@ -9789,7 +10280,7 @@ const opDescribeImageAttribute = "DescribeImageAttribute" // DescribeImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeImageAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9864,7 +10355,7 @@ const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the // client's request for the DescribeImages operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -9944,7 +10435,7 @@ const opDescribeImportImageTasks = "DescribeImportImageTasks" // DescribeImportImageTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeImportImageTasks operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10019,7 +10510,7 @@ const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" // DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeImportSnapshotTasks operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10093,7 +10584,7 @@ const opDescribeInstanceAttribute = "DescribeInstanceAttribute" // DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10171,7 +10662,7 @@ const opDescribeInstanceCreditSpecifications = "DescribeInstanceCreditSpecificat // DescribeInstanceCreditSpecificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceCreditSpecifications operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10263,7 +10754,7 @@ const opDescribeInstanceStatus = "DescribeInstanceStatus" // DescribeInstanceStatusRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceStatus operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10414,7 +10905,7 @@ const opDescribeInstances = "DescribeInstances" // DescribeInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10559,7 +11050,7 @@ const opDescribeInternetGateways = "DescribeInternetGateways" // DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeInternetGateways operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10599,7 +11090,7 @@ func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInp // DescribeInternetGateways API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your Internet gateways. +// Describes one or more of your internet gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10633,7 +11124,7 @@ const opDescribeKeyPairs = "DescribeKeyPairs" // DescribeKeyPairsRequest generates a "aws/request.Request" representing the // client's request for the DescribeKeyPairs operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10710,7 +11201,7 @@ const opDescribeLaunchTemplateVersions = "DescribeLaunchTemplateVersions" // DescribeLaunchTemplateVersionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeLaunchTemplateVersions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10785,7 +11276,7 @@ const opDescribeLaunchTemplates = "DescribeLaunchTemplates" // DescribeLaunchTemplatesRequest generates a "aws/request.Request" representing the // client's request for the DescribeLaunchTemplates operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10859,7 +11350,7 @@ const opDescribeMovingAddresses = "DescribeMovingAddresses" // DescribeMovingAddressesRequest generates a "aws/request.Request" representing the // client's request for the DescribeMovingAddresses operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10935,7 +11426,7 @@ const opDescribeNatGateways = "DescribeNatGateways" // DescribeNatGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeNatGateways operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -10981,7 +11472,7 @@ func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req * // DescribeNatGateways API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of the your NAT gateways. +// Describes one or more of your NAT gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11065,7 +11556,7 @@ const opDescribeNetworkAcls = "DescribeNetworkAcls" // DescribeNetworkAclsRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkAcls operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11107,7 +11598,7 @@ func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req * // // Describes one or more of your network ACLs. // -// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11142,7 +11633,7 @@ const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" // DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11217,7 +11708,7 @@ const opDescribeNetworkInterfacePermissions = "DescribeNetworkInterfacePermissio // DescribeNetworkInterfacePermissionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfacePermissions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11291,7 +11782,7 @@ const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" // DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfaces operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11365,7 +11856,7 @@ const opDescribePlacementGroups = "DescribePlacementGroups" // DescribePlacementGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribePlacementGroups operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11441,7 +11932,7 @@ const opDescribePrefixLists = "DescribePrefixLists" // DescribePrefixListsRequest generates a "aws/request.Request" representing the // client's request for the DescribePrefixLists operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11485,7 +11976,8 @@ func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req * // the prefix list name and prefix list ID of the service and the IP address // range for the service. A prefix list ID is required for creating an outbound // security group rule that allows traffic from a VPC to access an AWS service -// through a gateway VPC endpoint. +// through a gateway VPC endpoint. Currently, the services that support this +// action are Amazon S3 and Amazon DynamoDB. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11515,11 +12007,99 @@ func (c *EC2) DescribePrefixListsWithContext(ctx aws.Context, input *DescribePre return out, req.Send() } +const opDescribePrincipalIdFormat = "DescribePrincipalIdFormat" + +// DescribePrincipalIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribePrincipalIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePrincipalIdFormat for more information on using the DescribePrincipalIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePrincipalIdFormatRequest method. +// req, resp := client.DescribePrincipalIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat +func (c *EC2) DescribePrincipalIdFormatRequest(input *DescribePrincipalIdFormatInput) (req *request.Request, output *DescribePrincipalIdFormatOutput) { + op := &request.Operation{ + Name: opDescribePrincipalIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePrincipalIdFormatInput{} + } + + output = &DescribePrincipalIdFormatOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePrincipalIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Describes the ID format settings for the root user and all IAM roles and +// IAM users that have explicitly specified a longer ID (17-character ID) preference. +// +// By default, all IAM roles and IAM users default to the same ID settings as +// the root user, unless they explicitly override the settings. This request +// is useful for identifying those IAM users and IAM roles that have overridden +// the default ID settings. +// +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribePrincipalIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat +func (c *EC2) DescribePrincipalIdFormat(input *DescribePrincipalIdFormatInput) (*DescribePrincipalIdFormatOutput, error) { + req, out := c.DescribePrincipalIdFormatRequest(input) + return out, req.Send() +} + +// DescribePrincipalIdFormatWithContext is the same as DescribePrincipalIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePrincipalIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePrincipalIdFormatWithContext(ctx aws.Context, input *DescribePrincipalIdFormatInput, opts ...request.Option) (*DescribePrincipalIdFormatOutput, error) { + req, out := c.DescribePrincipalIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeRegions = "DescribeRegions" // DescribeRegionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeRegions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11596,7 +12176,7 @@ const opDescribeReservedInstances = "DescribeReservedInstances" // DescribeReservedInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11673,7 +12253,7 @@ const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" // DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesListings operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11768,7 +12348,7 @@ const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModif // DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesModifications operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -11904,7 +12484,7 @@ const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings // DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesOfferings operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12045,7 +12625,7 @@ const opDescribeRouteTables = "DescribeRouteTables" // DescribeRouteTablesRequest generates a "aws/request.Request" representing the // client's request for the DescribeRouteTables operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12092,7 +12672,7 @@ func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req * // with the main route table. This command does not return the subnet ID for // implicit associations. // -// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -12127,7 +12707,7 @@ const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvaila // DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledInstanceAvailability operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12209,7 +12789,7 @@ const opDescribeScheduledInstances = "DescribeScheduledInstances" // DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12283,7 +12863,7 @@ const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences" // DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSecurityGroupReferences operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12358,7 +12938,7 @@ const opDescribeSecurityGroups = "DescribeSecurityGroups" // DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSecurityGroups operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12439,7 +13019,7 @@ const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" // DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshotAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12517,7 +13097,7 @@ const opDescribeSnapshots = "DescribeSnapshots" // DescribeSnapshotsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshots operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12692,7 +13272,7 @@ const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" // DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotDatafeedSubscription operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12734,7 +13314,7 @@ func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafee // // Describes the data feed for Spot Instances. For more information, see Spot // Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12768,7 +13348,7 @@ const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" // DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12842,7 +13422,7 @@ const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" // DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetRequestHistory operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -12921,7 +13501,7 @@ const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" // DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetRequests operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13054,7 +13634,7 @@ const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" // DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotInstanceRequests operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13094,11 +13674,7 @@ func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceReq // DescribeSpotInstanceRequests API operation for Amazon Elastic Compute Cloud. // -// Describes the Spot Instance requests that belong to your account. Spot Instances -// are instances that Amazon EC2 launches when the Spot price that you specify -// exceeds the current Spot price. For more information, see Spot Instance Requests -// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) in -// the Amazon Elastic Compute Cloud User Guide. +// Describes the specified Spot Instance requests. // // You can use DescribeSpotInstanceRequests to find a running Spot Instance // by examining the response. If the status of the Spot Instance is fulfilled, @@ -13106,8 +13682,8 @@ func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceReq // instance. Alternatively, you can use DescribeInstances with a filter to look // for instances where the instance lifecycle is spot. // -// Spot Instance requests are deleted 4 hours after they are canceled and their -// instances are terminated. +// Spot Instance requests are deleted four hours after they are canceled and +// their instances are terminated. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13141,7 +13717,7 @@ const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" // DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotPriceHistory operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13189,7 +13765,7 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // // Describes the Spot price history. For more information, see Spot Instance // Pricing History (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide for Linux Instances. // // When you specify a start and end time, this operation returns the prices // of the instance types within the time range that you specified and the time @@ -13278,7 +13854,7 @@ const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups" // DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeStaleSecurityGroups operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13355,7 +13931,7 @@ const opDescribeSubnets = "DescribeSubnets" // DescribeSubnetsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSubnets operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13397,7 +13973,7 @@ func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request. // // Describes one or more of your subnets. // -// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// For more information, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -13432,7 +14008,7 @@ const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13565,7 +14141,7 @@ const opDescribeVolumeAttribute = "DescribeVolumeAttribute" // DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumeAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13643,7 +14219,7 @@ const opDescribeVolumeStatus = "DescribeVolumeStatus" // DescribeVolumeStatusRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumeStatus operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13706,8 +14282,9 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req // status of the volume is ok. If the check fails, the overall status is impaired. // If the status is insufficient-data, then the checks may still be taking place // on your volume at the time. We recommend that you retry the request. For -// more information on volume status, see Monitoring the Status of Your Volumes -// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). +// more information about volume status, see Monitoring the Status of Your Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html) +// in the Amazon Elastic Compute Cloud User Guide. // // Events: Reflect the cause of a volume status and may require you to take // action. For example, if your volume returns an impaired status, then the @@ -13807,7 +14384,7 @@ const opDescribeVolumes = "DescribeVolumes" // DescribeVolumesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumes operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13947,7 +14524,7 @@ const opDescribeVolumesModifications = "DescribeVolumesModifications" // DescribeVolumesModificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumesModifications operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -13999,7 +14576,8 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica // You can also use CloudWatch Events to check the status of a modification // to an EBS volume. For information about CloudWatch Events, see the Amazon // CloudWatch Events User Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). -// For more information, see Monitoring Volume Modifications" (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// For more information, see Monitoring Volume Modifications" (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) +// in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14033,7 +14611,7 @@ const opDescribeVpcAttribute = "DescribeVpcAttribute" // DescribeVpcAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14108,7 +14686,7 @@ const opDescribeVpcClassicLink = "DescribeVpcClassicLink" // DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcClassicLink operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14182,7 +14760,7 @@ const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" // DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14262,7 +14840,7 @@ const opDescribeVpcEndpointConnectionNotifications = "DescribeVpcEndpointConnect // DescribeVpcEndpointConnectionNotificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointConnectionNotifications operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14337,7 +14915,7 @@ const opDescribeVpcEndpointConnections = "DescribeVpcEndpointConnections" // DescribeVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointConnections operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14412,7 +14990,7 @@ const opDescribeVpcEndpointServiceConfigurations = "DescribeVpcEndpointServiceCo // DescribeVpcEndpointServiceConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServiceConfigurations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14486,7 +15064,7 @@ const opDescribeVpcEndpointServicePermissions = "DescribeVpcEndpointServicePermi // DescribeVpcEndpointServicePermissionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServicePermissions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14561,7 +15139,7 @@ const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" // DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServices operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14635,7 +15213,7 @@ const opDescribeVpcEndpoints = "DescribeVpcEndpoints" // DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpoints operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14709,7 +15287,7 @@ const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" // DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcPeeringConnections operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14783,7 +15361,7 @@ const opDescribeVpcs = "DescribeVpcs" // DescribeVpcsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcs operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14857,7 +15435,7 @@ const opDescribeVpnConnections = "DescribeVpnConnections" // DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpnConnections operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -14935,7 +15513,7 @@ const opDescribeVpnGateways = "DescribeVpnGateways" // DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpnGateways operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15013,7 +15591,7 @@ const opDetachClassicLinkVpc = "DetachClassicLinkVpc" // DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the // client's request for the DetachClassicLinkVpc operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15089,7 +15667,7 @@ const opDetachInternetGateway = "DetachInternetGateway" // DetachInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DetachInternetGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15131,8 +15709,8 @@ func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (r // DetachInternetGateway API operation for Amazon Elastic Compute Cloud. // -// Detaches an Internet gateway from a VPC, disabling connectivity between the -// Internet and the VPC. The VPC must not contain any running instances with +// Detaches an internet gateway from a VPC, disabling connectivity between the +// internet and the VPC. The VPC must not contain any running instances with // Elastic IP addresses or public IPv4 addresses. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -15167,7 +15745,7 @@ const opDetachNetworkInterface = "DetachNetworkInterface" // DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the DetachNetworkInterface operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15243,7 +15821,7 @@ const opDetachVolume = "DetachVolume" // DetachVolumeRequest generates a "aws/request.Request" representing the // client's request for the DetachVolume operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15330,7 +15908,7 @@ const opDetachVpnGateway = "DetachVpnGateway" // DetachVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the DetachVpnGateway operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15413,7 +15991,7 @@ const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" // DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the // client's request for the DisableVgwRoutePropagation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15490,7 +16068,7 @@ const opDisableVpcClassicLink = "DisableVpcClassicLink" // DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the DisableVpcClassicLink operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15565,7 +16143,7 @@ const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" // DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15607,8 +16185,8 @@ func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLin // // Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve // to public IP addresses when addressed between a linked EC2-Classic instance -// and instances in the VPC to which it's linked. For more information about -// ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// and instances in the VPC to which it's linked. For more information, see +// ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -15643,7 +16221,7 @@ const opDisassociateAddress = "DisassociateAddress" // DisassociateAddressRequest generates a "aws/request.Request" representing the // client's request for the DisassociateAddress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15727,7 +16305,7 @@ const opDisassociateIamInstanceProfile = "DisassociateIamInstanceProfile" // DisassociateIamInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the DisassociateIamInstanceProfile operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15803,7 +16381,7 @@ const opDisassociateRouteTable = "DisassociateRouteTable" // DisassociateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the DisassociateRouteTable operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15884,7 +16462,7 @@ const opDisassociateSubnetCidrBlock = "DisassociateSubnetCidrBlock" // DisassociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the DisassociateSubnetCidrBlock operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -15960,7 +16538,7 @@ const opDisassociateVpcCidrBlock = "DisassociateVpcCidrBlock" // DisassociateVpcCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the DisassociateVpcCidrBlock operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16040,7 +16618,7 @@ const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" // EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the // client's request for the EnableVgwRoutePropagation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16117,7 +16695,7 @@ const opEnableVolumeIO = "EnableVolumeIO" // EnableVolumeIORequest generates a "aws/request.Request" representing the // client's request for the EnableVolumeIO operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16194,7 +16772,7 @@ const opEnableVpcClassicLink = "EnableVpcClassicLink" // EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the EnableVpcClassicLink operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16236,7 +16814,7 @@ func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req // // Enables a VPC for ClassicLink. You can then link EC2-Classic instances to // your ClassicLink-enabled VPC to allow communication over private IP addresses. -// You cannot enable your VPC for ClassicLink if any of your VPC's route tables +// You cannot enable your VPC for ClassicLink if any of your VPC route tables // have existing routes for address ranges within the 10.0.0.0/8 IP address // range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 // IP address ranges. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) @@ -16274,7 +16852,7 @@ const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" // EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16318,8 +16896,8 @@ func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkD // the DNS hostname of a linked EC2-Classic instance resolves to its private // IP address when addressed from an instance in the VPC to which it's linked. // Similarly, the DNS hostname of an instance in a VPC resolves to its private -// IP address when addressed from a linked EC2-Classic instance. For more information -// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// IP address when addressed from a linked EC2-Classic instance. For more information, +// see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16354,7 +16932,7 @@ const opGetConsoleOutput = "GetConsoleOutput" // GetConsoleOutputRequest generates a "aws/request.Request" representing the // client's request for the GetConsoleOutput operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16394,24 +16972,23 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques // GetConsoleOutput API operation for Amazon Elastic Compute Cloud. // -// Gets the console output for the specified instance. -// -// Instances do not have a physical monitor through which you can view their -// console output. They also lack physical controls that allow you to power -// up, reboot, or shut them down. To allow these actions, we provide them through -// the Amazon EC2 API and command line interface. +// Gets the console output for the specified instance. For Linux instances, +// the instance console output displays the exact console output that would +// normally be displayed on a physical monitor attached to a computer. For Windows +// instances, the instance console output includes the last three system event +// log errors. // -// Instance console output is buffered and posted shortly after instance boot, -// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output, -// which is available for at least one hour after the most recent post. +// By default, the console output returns buffered information that was posted +// shortly after an instance transition state (start, stop, reboot, or terminate). +// This information is available for at least one hour after the most recent +// post. Only the most recent 64 KB of console output is available. // -// For Linux instances, the instance console output displays the exact console -// output that would normally be displayed on a physical monitor attached to -// a computer. This output is buffered because the instance produces it and -// then posts it to a store where the instance's owner can retrieve it. +// You can optionally retrieve the latest serial console output at any time +// during the instance lifecycle. This option is supported on instance types +// that use the Nitro hypervisor. // -// For Windows instances, the instance console output includes output from the -// EC2Config service. +// For more information, see Instance Console Output (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html#instance-console-console-output) +// in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16445,7 +17022,7 @@ const opGetConsoleScreenshot = "GetConsoleScreenshot" // GetConsoleScreenshotRequest generates a "aws/request.Request" representing the // client's request for the GetConsoleScreenshot operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16521,7 +17098,7 @@ const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" // GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the // client's request for the GetHostReservationPurchasePreview operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16600,7 +17177,7 @@ const opGetLaunchTemplateData = "GetLaunchTemplateData" // GetLaunchTemplateDataRequest generates a "aws/request.Request" representing the // client's request for the GetLaunchTemplateData operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16675,7 +17252,7 @@ const opGetPasswordData = "GetPasswordData" // GetPasswordDataRequest generates a "aws/request.Request" representing the // client's request for the GetPasswordData operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16766,7 +17343,7 @@ const opGetReservedInstancesExchangeQuote = "GetReservedInstancesExchangeQuote" // GetReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the // client's request for the GetReservedInstancesExchangeQuote operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16843,7 +17420,7 @@ const opImportImage = "ImportImage" // ImportImageRequest generates a "aws/request.Request" representing the // client's request for the ImportImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -16920,7 +17497,7 @@ const opImportInstance = "ImportInstance" // ImportInstanceRequest generates a "aws/request.Request" representing the // client's request for the ImportInstance operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17000,7 +17577,7 @@ const opImportKeyPair = "ImportKeyPair" // ImportKeyPairRequest generates a "aws/request.Request" representing the // client's request for the ImportKeyPair operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17081,7 +17658,7 @@ const opImportSnapshot = "ImportSnapshot" // ImportSnapshotRequest generates a "aws/request.Request" representing the // client's request for the ImportSnapshot operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17155,7 +17732,7 @@ const opImportVolume = "ImportVolume" // ImportVolumeRequest generates a "aws/request.Request" representing the // client's request for the ImportVolume operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17229,11 +17806,87 @@ func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput, return out, req.Send() } +const opModifyFleet = "ModifyFleet" + +// ModifyFleetRequest generates a "aws/request.Request" representing the +// client's request for the ModifyFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyFleet for more information on using the ModifyFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyFleetRequest method. +// req, resp := client.ModifyFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFleet +func (c *EC2) ModifyFleetRequest(input *ModifyFleetInput) (req *request.Request, output *ModifyFleetOutput) { + op := &request.Operation{ + Name: opModifyFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyFleetInput{} + } + + output = &ModifyFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyFleet API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified EC2 Fleet. +// +// While the EC2 Fleet is being modified, it is in the modifying state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyFleet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFleet +func (c *EC2) ModifyFleet(input *ModifyFleetInput) (*ModifyFleetOutput, error) { + req, out := c.ModifyFleetRequest(input) + return out, req.Send() +} + +// ModifyFleetWithContext is the same as ModifyFleet with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyFleetWithContext(ctx aws.Context, input *ModifyFleetInput, opts ...request.Option) (*ModifyFleetOutput, error) { + req, out := c.ModifyFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyFpgaImageAttribute = "ModifyFpgaImageAttribute" // ModifyFpgaImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyFpgaImageAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17307,7 +17960,7 @@ const opModifyHosts = "ModifyHosts" // ModifyHostsRequest generates a "aws/request.Request" representing the // client's request for the ModifyHosts operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17348,12 +18001,12 @@ func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, // ModifyHosts API operation for Amazon Elastic Compute Cloud. // // Modify the auto-placement setting of a Dedicated Host. When auto-placement -// is enabled, AWS will place instances that you launch with a tenancy of host, -// but without targeting a specific host ID, onto any available Dedicated Host -// in your account which has auto-placement enabled. When auto-placement is -// disabled, you need to provide a host ID if you want the instance to launch -// onto a specific host. If no host ID is provided, the instance will be launched -// onto a suitable host which has auto-placement enabled. +// is enabled, any instances that you launch with a tenancy of host but without +// a specific host ID are placed onto any available Dedicated Host in your account +// that has auto-placement enabled. When auto-placement is disabled, you need +// to provide a host ID ito have the instance launch onto a specific host. If +// no host ID is provided, the instance is launched onto a suitable host with +// auto-placement enabled. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17387,7 +18040,7 @@ const opModifyIdFormat = "ModifyIdFormat" // ModifyIdFormatRequest generates a "aws/request.Request" representing the // client's request for the ModifyIdFormat operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17431,8 +18084,16 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re // // Modifies the ID format for the specified resource on a per-region basis. // You can specify that resources should receive longer IDs (17-character IDs) -// when they are created. The following resource types support longer IDs: instance -// | reservation | snapshot | volume. +// when they are created. +// +// This request can only be used to modify longer ID settings for resource types +// that are within the opt-in period. Resources currently in their opt-in period +// include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation +// | elastic-ip-association | export-task | flow-log | image | import-task | +// internet-gateway | network-acl | network-acl-association | network-interface +// | network-interface-attachment | prefix-list | route-table | route-table-association +// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. // // This setting applies to the IAM user who makes the request; it does not apply // to the entire AWS account. By default, an IAM user defaults to the same settings @@ -17477,7 +18138,7 @@ const opModifyIdentityIdFormat = "ModifyIdentityIdFormat" // ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the // client's request for the ModifyIdentityIdFormat operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17524,8 +18185,16 @@ func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput) // user for an account. You can specify that resources should receive longer // IDs (17-character IDs) when they are created. // -// The following resource types support longer IDs: instance | reservation | -// snapshot | volume. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// This request can only be used to modify longer ID settings for resource types +// that are within the opt-in period. Resources currently in their opt-in period +// include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation +// | elastic-ip-association | export-task | flow-log | image | import-task | +// internet-gateway | network-acl | network-acl-association | network-interface +// | network-interface-attachment | prefix-list | route-table | route-table-association +// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) // in the Amazon Elastic Compute Cloud User Guide. // // This setting applies to the principal specified in the request; it does not @@ -17567,7 +18236,7 @@ const opModifyImageAttribute = "ModifyImageAttribute" // ModifyImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyImageAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17652,7 +18321,7 @@ const opModifyInstanceAttribute = "ModifyInstanceAttribute" // ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstanceAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17697,6 +18366,12 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput // Modifies the specified attribute of the specified instance. You can specify // only one attribute at a time. // +// Note: Using this action to change the security groups associated with an +// elastic network interface (ENI) attached to an instance in a VPC can result +// in an error if the instance has more than one ENI. To change the security +// groups associated with an ENI attached to an instance that has multiple ENIs, +// we recommend that you use the ModifyNetworkInterfaceAttribute action. +// // To modify some attributes, the instance must be stopped. For more information, // see Modifying Attributes of a Stopped Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -17733,7 +18408,7 @@ const opModifyInstanceCreditSpecification = "ModifyInstanceCreditSpecification" // ModifyInstanceCreditSpecificationRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstanceCreditSpecification operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17811,7 +18486,7 @@ const opModifyInstancePlacement = "ModifyInstancePlacement" // ModifyInstancePlacementRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstancePlacement operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17851,25 +18526,28 @@ func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput // ModifyInstancePlacement API operation for Amazon Elastic Compute Cloud. // -// Set the instance affinity value for a specific stopped instance and modify -// the instance tenancy setting. +// Modifies the placement attributes for a specified instance. You can do the +// following: +// +// * Modify the affinity between an instance and a Dedicated Host (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html). +// When affinity is set to host and the instance is not associated with a +// specific Dedicated Host, the next time the instance is launched, it is +// automatically associated with the host on which it lands. If the instance +// is restarted or rebooted, this relationship persists. +// +// * Change the Dedicated Host with which an instance is associated. // -// Instance affinity is disabled by default. When instance affinity is host -// and it is not associated with a specific Dedicated Host, the next time it -// is launched it will automatically be associated with the host it lands on. -// This relationship will persist if the instance is stopped/started, or rebooted. +// * Change the instance tenancy of an instance from host to dedicated, or +// from dedicated to host. // -// You can modify the host ID associated with a stopped instance. If a stopped -// instance has a new host ID association, the instance will target that host -// when restarted. +// * Move an instance to or from a placement group (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html). // -// You can modify the tenancy of a stopped instance with a tenancy of host or -// dedicated. +// At least one attribute for affinity, host ID, tenancy, or placement group +// name must be specified in the request. Affinity and tenancy can be modified +// in the same request. // -// Affinity, hostID, and tenancy are not required parameters, but at least one -// of them must be specified in the request. Affinity and tenancy can be modified -// in the same request, but tenancy can only be modified on instances that are -// stopped. +// To modify the host ID, tenancy, or placement group for an instance, the instance +// must be in the stopped state. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17903,7 +18581,7 @@ const opModifyLaunchTemplate = "ModifyLaunchTemplate" // ModifyLaunchTemplateRequest generates a "aws/request.Request" representing the // client's request for the ModifyLaunchTemplate operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -17979,7 +18657,7 @@ const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" // ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18056,7 +18734,7 @@ const opModifyReservedInstances = "ModifyReservedInstances" // ModifyReservedInstancesRequest generates a "aws/request.Request" representing the // client's request for the ModifyReservedInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18136,7 +18814,7 @@ const opModifySnapshotAttribute = "ModifySnapshotAttribute" // ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifySnapshotAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18188,7 +18866,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // be made public. Snapshots encrypted with your default CMK cannot be shared // with other accounts. // -// For more information on modifying snapshot permissions, see Sharing Snapshots +// For more information about modifying snapshot permissions, see Sharing Snapshots // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -18224,7 +18902,7 @@ const opModifySpotFleetRequest = "ModifySpotFleetRequest" // ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the // client's request for the ModifySpotFleetRequest operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18320,7 +18998,7 @@ const opModifySubnetAttribute = "ModifySubnetAttribute" // ModifySubnetAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifySubnetAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18396,7 +19074,7 @@ const opModifyVolume = "ModifyVolume" // ModifyVolumeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVolume operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18461,10 +19139,9 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques // // With previous-generation instance types, resizing an EBS volume may require // detaching and reattaching the volume or stopping and restarting the instance. -// For more information about modifying an EBS volume running Linux, see Modifying -// the Size, IOPS, or Type of an EBS Volume on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). -// For more information about modifying an EBS volume running Windows, see Modifying -// the Size, IOPS, or Type of an EBS Volume on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// For more information, see Modifying the Size, IOPS, or Type of an EBS Volume +// on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html) +// and Modifying the Size, IOPS, or Type of an EBS Volume on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). // // If you reach the maximum volume modification rate per volume limit, you will // need to wait at least six hours before applying further modifications to @@ -18502,7 +19179,7 @@ const opModifyVolumeAttribute = "ModifyVolumeAttribute" // ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVolumeAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18587,7 +19264,7 @@ const opModifyVpcAttribute = "ModifyVpcAttribute" // ModifyVpcAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18663,7 +19340,7 @@ const opModifyVpcEndpoint = "ModifyVpcEndpoint" // ModifyVpcEndpointRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpoint operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18740,7 +19417,7 @@ const opModifyVpcEndpointConnectionNotification = "ModifyVpcEndpointConnectionNo // ModifyVpcEndpointConnectionNotificationRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpointConnectionNotification operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18816,7 +19493,7 @@ const opModifyVpcEndpointServiceConfiguration = "ModifyVpcEndpointServiceConfigu // ModifyVpcEndpointServiceConfigurationRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpointServiceConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18893,7 +19570,7 @@ const opModifyVpcEndpointServicePermissions = "ModifyVpcEndpointServicePermissio // ModifyVpcEndpointServicePermissionsRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpointServicePermissions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -18933,9 +19610,14 @@ func (c *EC2) ModifyVpcEndpointServicePermissionsRequest(input *ModifyVpcEndpoin // ModifyVpcEndpointServicePermissions API operation for Amazon Elastic Compute Cloud. // -// Modifies the permissions for your VPC endpoint service. You can add or remove -// permissions for service consumers (IAM users, IAM roles, and AWS accounts) -// to discover your endpoint service. +// Modifies the permissions for your VPC endpoint service (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/endpoint-service.html). +// You can add or remove permissions for service consumers (IAM users, IAM roles, +// and AWS accounts) to connect to your endpoint service. +// +// If you grant permissions to all principals, the service is public. Any users +// who know the name of a public service can send a request to attach an endpoint. +// If the service does not require manual approval, attachments are automatically +// approved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -18969,7 +19651,7 @@ const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions" // ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19019,7 +19701,7 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo // * Enable/disable communication over the peering connection between instances // in your VPC and an EC2-Classic instance that's linked to the peer VPC. // -// * Enable/disable a local VPC to resolve public DNS hostnames to private +// * Enable/disable the ability to resolve public DNS hostnames to private // IP addresses when queried from instances in the peer VPC. // // If the peered VPCs are in different accounts, each owner must initiate a @@ -19062,7 +19744,7 @@ const opModifyVpcTenancy = "ModifyVpcTenancy" // ModifyVpcTenancyRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcTenancy operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19110,7 +19792,7 @@ func (c *EC2) ModifyVpcTenancyRequest(input *ModifyVpcTenancyInput) (req *reques // into the VPC have a tenancy of default, unless you specify otherwise during // launch. The tenancy of any existing instances in the VPC is not affected. // -// For more information about Dedicated Instances, see Dedicated Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// For more information, see Dedicated Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -19145,7 +19827,7 @@ const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the // client's request for the MonitorInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19224,7 +19906,7 @@ const opMoveAddressToVpc = "MoveAddressToVpc" // MoveAddressToVpcRequest generates a "aws/request.Request" representing the // client's request for the MoveAddressToVpc operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19304,7 +19986,7 @@ const opPurchaseHostReservation = "PurchaseHostReservation" // PurchaseHostReservationRequest generates a "aws/request.Request" representing the // client's request for the PurchaseHostReservation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19381,7 +20063,7 @@ const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" // PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the // client's request for the PurchaseReservedInstancesOffering operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19464,7 +20146,7 @@ const opPurchaseScheduledInstances = "PurchaseScheduledInstances" // PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the PurchaseScheduledInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19547,7 +20229,7 @@ const opRebootInstances = "RebootInstances" // RebootInstancesRequest generates a "aws/request.Request" representing the // client's request for the RebootInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19633,7 +20315,7 @@ const opRegisterImage = "RegisterImage" // RegisterImageRequest generates a "aws/request.Request" representing the // client's request for the RegisterImage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19693,10 +20375,13 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE // Linux Enterprise Server (SLES), use the EC2 billing product code associated // with an AMI to verify the subscription status for package updates. Creating -// an AMI from an EBS snapshot does not maintain this billing code, and subsequent -// instances launched from such an AMI will not be able to connect to package -// update infrastructure. To create an AMI that must retain billing codes, see -// CreateImage. +// an AMI from an EBS snapshot does not maintain this billing code, and instances +// launched from such an AMI are not able to connect to package update infrastructure. +// If you purchase a Reserved Instance offering for one of these Linux distributions +// and launch instances using an AMI that does not contain the required billing +// code, your Reserved Instance is not applied to these instances. +// +// To create an AMI for operating systems that require a billing code, see CreateImage. // // If needed, you can deregister an AMI at any time. Any modifications you make // to an AMI backed by an instance store volume invalidates its registration. @@ -19735,7 +20420,7 @@ const opRejectVpcEndpointConnections = "RejectVpcEndpointConnections" // RejectVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the // client's request for the RejectVpcEndpointConnections operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19810,7 +20495,7 @@ const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" // RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the RejectVpcPeeringConnection operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19888,7 +20573,7 @@ const opReleaseAddress = "ReleaseAddress" // ReleaseAddressRequest generates a "aws/request.Request" representing the // client's request for the ReleaseAddress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -19981,7 +20666,7 @@ const opReleaseHosts = "ReleaseHosts" // ReleaseHostsRequest generates a "aws/request.Request" representing the // client's request for the ReleaseHosts operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20024,15 +20709,14 @@ func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Reques // When you no longer want to use an On-Demand Dedicated Host it can be released. // On-Demand billing is stopped and the host goes into released state. The host // ID of Dedicated Hosts that have been released can no longer be specified -// in another request, e.g., ModifyHosts. You must stop or terminate all instances -// on a host before it can be released. +// in another request, for example, ModifyHosts. You must stop or terminate +// all instances on a host before it can be released. // -// When Dedicated Hosts are released, it make take some time for them to stop +// When Dedicated Hosts are released, it may take some time for them to stop // counting toward your limit and you may receive capacity errors when trying -// to allocate new Dedicated hosts. Try waiting a few minutes, and then try -// again. +// to allocate new Dedicated Hosts. Wait a few minutes and then try again. // -// Released hosts will still appear in a DescribeHosts response. +// Released hosts still appear in a DescribeHosts response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20066,7 +20750,7 @@ const opReplaceIamInstanceProfileAssociation = "ReplaceIamInstanceProfileAssocia // ReplaceIamInstanceProfileAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceIamInstanceProfileAssociation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20145,7 +20829,7 @@ const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" // ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceNetworkAclAssociation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20187,7 +20871,7 @@ func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssoci // // Changes which network ACL a subnet is associated with. By default when you // create a subnet, it's automatically associated with the default network ACL. -// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// For more information, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // This is an idempotent operation. @@ -20224,7 +20908,7 @@ const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" // ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the ReplaceNetworkAclEntry operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20266,8 +20950,8 @@ func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) // ReplaceNetworkAclEntry API operation for Amazon Elastic Compute Cloud. // -// Replaces an entry (rule) in a network ACL. For more information about network -// ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// Replaces an entry (rule) in a network ACL. For more information, see Network +// ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -20302,7 +20986,7 @@ const opReplaceRoute = "ReplaceRoute" // ReplaceRouteRequest generates a "aws/request.Request" representing the // client's request for the ReplaceRoute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20345,11 +21029,11 @@ func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Reques // ReplaceRoute API operation for Amazon Elastic Compute Cloud. // // Replaces an existing route within a route table in a VPC. You must provide -// only one of the following: Internet gateway or virtual private gateway, NAT +// only one of the following: internet gateway or virtual private gateway, NAT // instance, NAT gateway, VPC peering connection, network interface, or egress-only -// Internet gateway. +// internet gateway. // -// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// For more information, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -20384,7 +21068,7 @@ const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" // ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceRouteTableAssociation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20466,7 +21150,7 @@ const opReportInstanceStatus = "ReportInstanceStatus" // ReportInstanceStatusRequest generates a "aws/request.Request" representing the // client's request for the ReportInstanceStatus operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20548,7 +21232,7 @@ const opRequestSpotFleet = "RequestSpotFleet" // RequestSpotFleetRequest generates a "aws/request.Request" representing the // client's request for the RequestSpotFleet operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20590,6 +21274,10 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // // Creates a Spot Fleet request. // +// The Spot Fleet request specifies the total target capacity and the On-Demand +// target capacity. Amazon EC2 calculates the difference between the total capacity +// and On-Demand capacity, and launches the difference as Spot capacity. +// // You can submit a single request that includes multiple launch specifications // that vary by instance type, AMI, Availability Zone, or subnet. // @@ -20604,10 +21292,11 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // pools, you can improve the availability of your fleet. // // You can specify tags for the Spot Instances. You cannot tag other resource -// types in a Spot Fleet request; only the instance resource type is supported. +// types in a Spot Fleet request because only the instance resource type is +// supported. // // For more information, see Spot Fleet Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20641,7 +21330,7 @@ const opRequestSpotInstances = "RequestSpotInstances" // RequestSpotInstancesRequest generates a "aws/request.Request" representing the // client's request for the RequestSpotInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20681,10 +21370,10 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // RequestSpotInstances API operation for Amazon Elastic Compute Cloud. // -// Creates a Spot Instance request. Spot Instances are instances that Amazon -// EC2 launches when the maximum price that you specify exceeds the current -// Spot price. For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) -// in the Amazon Elastic Compute Cloud User Guide. +// Creates a Spot Instance request. +// +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20718,7 +21407,7 @@ const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" // ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetFpgaImageAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20793,7 +21482,7 @@ const opResetImageAttribute = "ResetImageAttribute" // ResetImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetImageAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20871,7 +21560,7 @@ const opResetInstanceAttribute = "ResetInstanceAttribute" // ResetInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetInstanceAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -20955,7 +21644,7 @@ const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" // ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetNetworkInterfaceAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21032,7 +21721,7 @@ const opResetSnapshotAttribute = "ResetSnapshotAttribute" // ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetSnapshotAttribute operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21076,7 +21765,7 @@ func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) // // Resets permission settings for the specified snapshot. // -// For more information on modifying snapshot permissions, see Sharing Snapshots +// For more information about modifying snapshot permissions, see Sharing Snapshots // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -21112,7 +21801,7 @@ const opRestoreAddressToClassic = "RestoreAddressToClassic" // RestoreAddressToClassicRequest generates a "aws/request.Request" representing the // client's request for the RestoreAddressToClassic operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21189,7 +21878,7 @@ const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" // RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the // client's request for the RevokeSecurityGroupEgress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21277,7 +21966,7 @@ const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" // RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the // client's request for the RevokeSecurityGroupIngress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21368,7 +22057,7 @@ const opRunInstances = "RunInstances" // RunInstancesRequest generates a "aws/request.Request" representing the // client's request for the RunInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21495,7 +22184,7 @@ const opRunScheduledInstances = "RunScheduledInstances" // RunScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the RunScheduledInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21579,7 +22268,7 @@ const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the // client's request for the StartInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21675,7 +22364,7 @@ const opStopInstances = "StopInstances" // StopInstancesRequest generates a "aws/request.Request" representing the // client's request for the StopInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21781,7 +22470,7 @@ const opTerminateInstances = "TerminateInstances" // TerminateInstancesRequest generates a "aws/request.Request" representing the // client's request for the TerminateInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21879,7 +22568,7 @@ const opUnassignIpv6Addresses = "UnassignIpv6Addresses" // UnassignIpv6AddressesRequest generates a "aws/request.Request" representing the // client's request for the UnassignIpv6Addresses operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -21953,7 +22642,7 @@ const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" // UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the // client's request for the UnassignPrivateIpAddresses operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -22029,7 +22718,7 @@ const opUnmonitorInstances = "UnmonitorInstances" // UnmonitorInstancesRequest generates a "aws/request.Request" representing the // client's request for the UnmonitorInstances operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -22105,7 +22794,7 @@ const opUpdateSecurityGroupRuleDescriptionsEgress = "UpdateSecurityGroupRuleDesc // UpdateSecurityGroupRuleDescriptionsEgressRequest generates a "aws/request.Request" representing the // client's request for the UpdateSecurityGroupRuleDescriptionsEgress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -22185,7 +22874,7 @@ const opUpdateSecurityGroupRuleDescriptionsIngress = "UpdateSecurityGroupRuleDes // UpdateSecurityGroupRuleDescriptionsIngressRequest generates a "aws/request.Request" representing the // client's request for the UpdateSecurityGroupRuleDescriptionsIngress operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -22262,7 +22951,6 @@ func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngressWithContext(ctx aws.Cont } // Contains the parameters for accepting the quote. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptReservedInstancesExchangeQuoteRequest type AcceptReservedInstancesExchangeQuoteInput struct { _ struct{} `type:"structure"` @@ -22335,7 +23023,6 @@ func (s *AcceptReservedInstancesExchangeQuoteInput) SetTargetConfigurations(v [] } // The result of the exchange and whether it was successful. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptReservedInstancesExchangeQuoteResult type AcceptReservedInstancesExchangeQuoteOutput struct { _ struct{} `type:"structure"` @@ -22359,7 +23046,6 @@ func (s *AcceptReservedInstancesExchangeQuoteOutput) SetExchangeId(v string) *Ac return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcEndpointConnectionsRequest type AcceptVpcEndpointConnectionsInput struct { _ struct{} `type:"structure"` @@ -22424,7 +23110,6 @@ func (s *AcceptVpcEndpointConnectionsInput) SetVpcEndpointIds(v []*string) *Acce return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcEndpointConnectionsResult type AcceptVpcEndpointConnectionsOutput struct { _ struct{} `type:"structure"` @@ -22449,7 +23134,6 @@ func (s *AcceptVpcEndpointConnectionsOutput) SetUnsuccessful(v []*UnsuccessfulIt } // Contains the parameters for AcceptVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcPeeringConnectionRequest type AcceptVpcPeeringConnectionInput struct { _ struct{} `type:"structure"` @@ -22487,7 +23171,6 @@ func (s *AcceptVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *A } // Contains the output of AcceptVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcPeeringConnectionResult type AcceptVpcPeeringConnectionOutput struct { _ struct{} `type:"structure"` @@ -22512,7 +23195,6 @@ func (s *AcceptVpcPeeringConnectionOutput) SetVpcPeeringConnection(v *VpcPeering } // Describes an account attribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AccountAttribute type AccountAttribute struct { _ struct{} `type:"structure"` @@ -22546,7 +23228,6 @@ func (s *AccountAttribute) SetAttributeValues(v []*AccountAttributeValue) *Accou } // Describes a value of an account attribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AccountAttributeValue type AccountAttributeValue struct { _ struct{} `type:"structure"` @@ -22571,7 +23252,6 @@ func (s *AccountAttributeValue) SetAttributeValue(v string) *AccountAttributeVal } // Describes a running instance in a Spot Fleet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ActiveInstance type ActiveInstance struct { _ struct{} `type:"structure"` @@ -22625,7 +23305,6 @@ func (s *ActiveInstance) SetSpotInstanceRequestId(v string) *ActiveInstance { } // Describes an Elastic IP address. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Address type Address struct { _ struct{} `type:"structure"` @@ -22724,7 +23403,6 @@ func (s *Address) SetTags(v []*Tag) *Address { } // Contains the parameters for AllocateAddress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddressRequest type AllocateAddressInput struct { _ struct{} `type:"structure"` @@ -22772,7 +23450,6 @@ func (s *AllocateAddressInput) SetDryRun(v bool) *AllocateAddressInput { } // Contains the output of AllocateAddress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddressResult type AllocateAddressOutput struct { _ struct{} `type:"structure"` @@ -22817,7 +23494,6 @@ func (s *AllocateAddressOutput) SetPublicIp(v string) *AllocateAddressOutput { } // Contains the parameters for AllocateHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateHostsRequest type AllocateHostsInput struct { _ struct{} `type:"structure"` @@ -22833,20 +23509,19 @@ type AllocateHostsInput struct { // AvailabilityZone is a required field AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // Unique, case-sensitive identifier you provide to ensure idempotency of the - // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) // in the Amazon Elastic Compute Cloud User Guide. ClientToken *string `locationName:"clientToken" type:"string"` - // Specify the instance type that you want your Dedicated Hosts to be configured - // for. When you specify the instance type, that is the only instance type that - // you can launch onto that host. + // Specify the instance type for which to configure your Dedicated Hosts. When + // you specify the instance type, that is the only instance type that you can + // launch onto that host. // // InstanceType is a required field InstanceType *string `locationName:"instanceType" type:"string" required:"true"` - // The number of Dedicated Hosts you want to allocate to your account with these - // parameters. + // The number of Dedicated Hosts to allocate to your account with these parameters. // // Quantity is a required field Quantity *int64 `locationName:"quantity" type:"integer" required:"true"` @@ -22912,12 +23587,11 @@ func (s *AllocateHostsInput) SetQuantity(v int64) *AllocateHostsInput { } // Contains the output of AllocateHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateHostsResult type AllocateHostsOutput struct { _ struct{} `type:"structure"` - // The ID of the allocated Dedicated Host. This is used when you want to launch - // an instance onto a specific host. + // The ID of the allocated Dedicated Host. This is used to launch an instance + // onto a specific host. HostIds []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` } @@ -22938,7 +23612,6 @@ func (s *AllocateHostsOutput) SetHostIds(v []*string) *AllocateHostsOutput { } // Describes a principal. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllowedPrincipal type AllowedPrincipal struct { _ struct{} `type:"structure"` @@ -22971,7 +23644,6 @@ func (s *AllowedPrincipal) SetPrincipalType(v string) *AllowedPrincipal { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignIpv6AddressesRequest type AssignIpv6AddressesInput struct { _ struct{} `type:"structure"` @@ -23031,7 +23703,6 @@ func (s *AssignIpv6AddressesInput) SetNetworkInterfaceId(v string) *AssignIpv6Ad return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignIpv6AddressesResult type AssignIpv6AddressesOutput struct { _ struct{} `type:"structure"` @@ -23065,7 +23736,6 @@ func (s *AssignIpv6AddressesOutput) SetNetworkInterfaceId(v string) *AssignIpv6A } // Contains the parameters for AssignPrivateIpAddresses. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateIpAddressesRequest type AssignPrivateIpAddressesInput struct { _ struct{} `type:"structure"` @@ -23138,7 +23808,6 @@ func (s *AssignPrivateIpAddressesInput) SetSecondaryPrivateIpAddressCount(v int6 return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateIpAddressesOutput type AssignPrivateIpAddressesOutput struct { _ struct{} `type:"structure"` } @@ -23154,7 +23823,6 @@ func (s AssignPrivateIpAddressesOutput) GoString() string { } // Contains the parameters for AssociateAddress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateAddressRequest type AssociateAddressInput struct { _ struct{} `type:"structure"` @@ -23247,7 +23915,6 @@ func (s *AssociateAddressInput) SetPublicIp(v string) *AssociateAddressInput { } // Contains the output of AssociateAddress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateAddressResult type AssociateAddressOutput struct { _ struct{} `type:"structure"` @@ -23273,7 +23940,6 @@ func (s *AssociateAddressOutput) SetAssociationId(v string) *AssociateAddressOut } // Contains the parameters for AssociateDhcpOptions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateDhcpOptionsRequest type AssociateDhcpOptionsInput struct { _ struct{} `type:"structure"` @@ -23339,7 +24005,6 @@ func (s *AssociateDhcpOptionsInput) SetVpcId(v string) *AssociateDhcpOptionsInpu return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateDhcpOptionsOutput type AssociateDhcpOptionsOutput struct { _ struct{} `type:"structure"` } @@ -23354,7 +24019,6 @@ func (s AssociateDhcpOptionsOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfileRequest type AssociateIamInstanceProfileInput struct { _ struct{} `type:"structure"` @@ -23407,7 +24071,6 @@ func (s *AssociateIamInstanceProfileInput) SetInstanceId(v string) *AssociateIam return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfileResult type AssociateIamInstanceProfileOutput struct { _ struct{} `type:"structure"` @@ -23432,7 +24095,6 @@ func (s *AssociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(v * } // Contains the parameters for AssociateRouteTable. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateRouteTableRequest type AssociateRouteTableInput struct { _ struct{} `type:"structure"` @@ -23498,11 +24160,11 @@ func (s *AssociateRouteTableInput) SetSubnetId(v string) *AssociateRouteTableInp } // Contains the output of AssociateRouteTable. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateRouteTableResult type AssociateRouteTableOutput struct { _ struct{} `type:"structure"` - // The route table association ID (needed to disassociate the route table). + // The route table association ID. This ID is required for disassociating the + // route table. AssociationId *string `locationName:"associationId" type:"string"` } @@ -23522,7 +24184,6 @@ func (s *AssociateRouteTableOutput) SetAssociationId(v string) *AssociateRouteTa return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateSubnetCidrBlockRequest type AssociateSubnetCidrBlockInput struct { _ struct{} `type:"structure"` @@ -23575,7 +24236,6 @@ func (s *AssociateSubnetCidrBlockInput) SetSubnetId(v string) *AssociateSubnetCi return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateSubnetCidrBlockResult type AssociateSubnetCidrBlockOutput struct { _ struct{} `type:"structure"` @@ -23608,7 +24268,6 @@ func (s *AssociateSubnetCidrBlockOutput) SetSubnetId(v string) *AssociateSubnetC return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateVpcCidrBlockRequest type AssociateVpcCidrBlockInput struct { _ struct{} `type:"structure"` @@ -23667,7 +24326,6 @@ func (s *AssociateVpcCidrBlockInput) SetVpcId(v string) *AssociateVpcCidrBlockIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateVpcCidrBlockResult type AssociateVpcCidrBlockOutput struct { _ struct{} `type:"structure"` @@ -23710,7 +24368,6 @@ func (s *AssociateVpcCidrBlockOutput) SetVpcId(v string) *AssociateVpcCidrBlockO } // Contains the parameters for AttachClassicLinkVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachClassicLinkVpcRequest type AttachClassicLinkVpcInput struct { _ struct{} `type:"structure"` @@ -23791,7 +24448,6 @@ func (s *AttachClassicLinkVpcInput) SetVpcId(v string) *AttachClassicLinkVpcInpu } // Contains the output of AttachClassicLinkVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachClassicLinkVpcResult type AttachClassicLinkVpcOutput struct { _ struct{} `type:"structure"` @@ -23816,7 +24472,6 @@ func (s *AttachClassicLinkVpcOutput) SetReturn(v bool) *AttachClassicLinkVpcOutp } // Contains the parameters for AttachInternetGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachInternetGatewayRequest type AttachInternetGatewayInput struct { _ struct{} `type:"structure"` @@ -23826,7 +24481,7 @@ type AttachInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The ID of the Internet gateway. + // The ID of the internet gateway. // // InternetGatewayId is a required field InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` @@ -23881,7 +24536,6 @@ func (s *AttachInternetGatewayInput) SetVpcId(v string) *AttachInternetGatewayIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachInternetGatewayOutput type AttachInternetGatewayOutput struct { _ struct{} `type:"structure"` } @@ -23897,7 +24551,6 @@ func (s AttachInternetGatewayOutput) GoString() string { } // Contains the parameters for AttachNetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachNetworkInterfaceRequest type AttachNetworkInterfaceInput struct { _ struct{} `type:"structure"` @@ -23977,7 +24630,6 @@ func (s *AttachNetworkInterfaceInput) SetNetworkInterfaceId(v string) *AttachNet } // Contains the output of AttachNetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachNetworkInterfaceResult type AttachNetworkInterfaceOutput struct { _ struct{} `type:"structure"` @@ -24002,7 +24654,6 @@ func (s *AttachNetworkInterfaceOutput) SetAttachmentId(v string) *AttachNetworkI } // Contains the parameters for AttachVolume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVolumeRequest type AttachVolumeInput struct { _ struct{} `type:"structure"` @@ -24083,7 +24734,6 @@ func (s *AttachVolumeInput) SetVolumeId(v string) *AttachVolumeInput { } // Contains the parameters for AttachVpnGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVpnGatewayRequest type AttachVpnGatewayInput struct { _ struct{} `type:"structure"` @@ -24149,7 +24799,6 @@ func (s *AttachVpnGatewayInput) SetVpnGatewayId(v string) *AttachVpnGatewayInput } // Contains the output of AttachVpnGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVpnGatewayResult type AttachVpnGatewayOutput struct { _ struct{} `type:"structure"` @@ -24174,7 +24823,6 @@ func (s *AttachVpnGatewayOutput) SetVpcAttachment(v *VpcAttachment) *AttachVpnGa } // Describes a value for a resource attribute that is a Boolean value. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttributeBooleanValue type AttributeBooleanValue struct { _ struct{} `type:"structure"` @@ -24199,11 +24847,10 @@ func (s *AttributeBooleanValue) SetValue(v bool) *AttributeBooleanValue { } // Describes a value for a resource attribute that is a String. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttributeValue type AttributeValue struct { _ struct{} `type:"structure"` - // The attribute value. Note that the value is case-sensitive. + // The attribute value. The value is case-sensitive. Value *string `locationName:"value" type:"string"` } @@ -24224,7 +24871,6 @@ func (s *AttributeValue) SetValue(v string) *AttributeValue { } // Contains the parameters for AuthorizeSecurityGroupEgress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupEgressRequest type AuthorizeSecurityGroupEgressInput struct { _ struct{} `type:"structure"` @@ -24342,7 +24988,6 @@ func (s *AuthorizeSecurityGroupEgressInput) SetToPort(v int64) *AuthorizeSecurit return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupEgressOutput type AuthorizeSecurityGroupEgressOutput struct { _ struct{} `type:"structure"` } @@ -24358,7 +25003,6 @@ func (s AuthorizeSecurityGroupEgressOutput) GoString() string { } // Contains the parameters for AuthorizeSecurityGroupIngress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupIngressRequest type AuthorizeSecurityGroupIngressInput struct { _ struct{} `type:"structure"` @@ -24491,7 +25135,6 @@ func (s *AuthorizeSecurityGroupIngressInput) SetToPort(v int64) *AuthorizeSecuri return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupIngressOutput type AuthorizeSecurityGroupIngressOutput struct { _ struct{} `type:"structure"` } @@ -24507,7 +25150,6 @@ func (s AuthorizeSecurityGroupIngressOutput) GoString() string { } // Describes an Availability Zone. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AvailabilityZone type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -24559,7 +25201,6 @@ func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { } // Describes a message about an Availability Zone. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AvailabilityZoneMessage type AvailabilityZoneMessage struct { _ struct{} `type:"structure"` @@ -24584,11 +25225,10 @@ func (s *AvailabilityZoneMessage) SetMessage(v string) *AvailabilityZoneMessage } // The capacity information for instances launched onto the Dedicated Host. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AvailableCapacity type AvailableCapacity struct { _ struct{} `type:"structure"` - // The total number of instances that the Dedicated Host supports. + // The total number of instances supported by the Dedicated Host. AvailableInstanceCapacity []*InstanceCapacity `locationName:"availableInstanceCapacity" locationNameList:"item" type:"list"` // The number of vCPUs available on the Dedicated Host. @@ -24617,7 +25257,6 @@ func (s *AvailableCapacity) SetAvailableVCpus(v int64) *AvailableCapacity { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BlobAttributeValue type BlobAttributeValue struct { _ struct{} `type:"structure"` @@ -24642,7 +25281,6 @@ func (s *BlobAttributeValue) SetValue(v []byte) *BlobAttributeValue { } // Describes a block device mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BlockDeviceMapping type BlockDeviceMapping struct { _ struct{} `type:"structure"` @@ -24659,10 +25297,13 @@ type BlockDeviceMapping struct { // The virtual device name (ephemeralN). Instance store volumes are numbered // starting from 0. An instance type with 2 available instance store volumes - // can specify mappings for ephemeral0 and ephemeral1.The number of available + // can specify mappings for ephemeral0 and ephemeral1. The number of available // instance store volumes depends on the instance type. After you connect to // the instance, you must mount the volume. // + // NVMe instance store volumes are automatically enumerated and assigned a device + // name. Including them in your block device mapping has no effect. + // // Constraints: For M3 instances, you must specify instance store volumes in // the block device mapping for the instance. When you launch an M3 instance, // we ignore any instance store volumes specified in the block device mapping @@ -24705,7 +25346,6 @@ func (s *BlockDeviceMapping) SetVirtualName(v string) *BlockDeviceMapping { } // Contains the parameters for BundleInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleInstanceRequest type BundleInstanceInput struct { _ struct{} `type:"structure"` @@ -24779,7 +25419,6 @@ func (s *BundleInstanceInput) SetStorage(v *Storage) *BundleInstanceInput { } // Contains the output of BundleInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleInstanceResult type BundleInstanceOutput struct { _ struct{} `type:"structure"` @@ -24804,7 +25443,6 @@ func (s *BundleInstanceOutput) SetBundleTask(v *BundleTask) *BundleInstanceOutpu } // Describes a bundle task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleTask type BundleTask struct { _ struct{} `type:"structure"` @@ -24821,7 +25459,7 @@ type BundleTask struct { Progress *string `locationName:"progress" type:"string"` // The time this task started. - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + StartTime *time.Time `locationName:"startTime" type:"timestamp"` // The state of the task. State *string `locationName:"state" type:"string" enum:"BundleTaskState"` @@ -24830,7 +25468,7 @@ type BundleTask struct { Storage *Storage `locationName:"storage" type:"structure"` // The time of the most recent update for the task. - UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp"` } // String returns the string representation @@ -24892,7 +25530,6 @@ func (s *BundleTask) SetUpdateTime(v time.Time) *BundleTask { } // Describes an error for BundleInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleTaskError type BundleTaskError struct { _ struct{} `type:"structure"` @@ -24926,7 +25563,6 @@ func (s *BundleTaskError) SetMessage(v string) *BundleTaskError { } // Contains the parameters for CancelBundleTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelBundleTaskRequest type CancelBundleTaskInput struct { _ struct{} `type:"structure"` @@ -24978,7 +25614,6 @@ func (s *CancelBundleTaskInput) SetDryRun(v bool) *CancelBundleTaskInput { } // Contains the output of CancelBundleTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelBundleTaskResult type CancelBundleTaskOutput struct { _ struct{} `type:"structure"` @@ -25003,7 +25638,6 @@ func (s *CancelBundleTaskOutput) SetBundleTask(v *BundleTask) *CancelBundleTaskO } // Contains the parameters for CancelConversionTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelConversionRequest type CancelConversionTaskInput struct { _ struct{} `type:"structure"` @@ -25063,7 +25697,6 @@ func (s *CancelConversionTaskInput) SetReasonMessage(v string) *CancelConversion return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelConversionTaskOutput type CancelConversionTaskOutput struct { _ struct{} `type:"structure"` } @@ -25079,7 +25712,6 @@ func (s CancelConversionTaskOutput) GoString() string { } // Contains the parameters for CancelExportTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelExportTaskRequest type CancelExportTaskInput struct { _ struct{} `type:"structure"` @@ -25118,7 +25750,6 @@ func (s *CancelExportTaskInput) SetExportTaskId(v string) *CancelExportTaskInput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelExportTaskOutput type CancelExportTaskOutput struct { _ struct{} `type:"structure"` } @@ -25134,7 +25765,6 @@ func (s CancelExportTaskOutput) GoString() string { } // Contains the parameters for CancelImportTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImportTaskRequest type CancelImportTaskInput struct { _ struct{} `type:"structure"` @@ -25180,7 +25810,6 @@ func (s *CancelImportTaskInput) SetImportTaskId(v string) *CancelImportTaskInput } // Contains the output for CancelImportTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImportTaskResult type CancelImportTaskOutput struct { _ struct{} `type:"structure"` @@ -25223,7 +25852,6 @@ func (s *CancelImportTaskOutput) SetState(v string) *CancelImportTaskOutput { } // Contains the parameters for CancelReservedInstancesListing. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelReservedInstancesListingRequest type CancelReservedInstancesListingInput struct { _ struct{} `type:"structure"` @@ -25263,7 +25891,6 @@ func (s *CancelReservedInstancesListingInput) SetReservedInstancesListingId(v st } // Contains the output of CancelReservedInstancesListing. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelReservedInstancesListingResult type CancelReservedInstancesListingOutput struct { _ struct{} `type:"structure"` @@ -25288,7 +25915,6 @@ func (s *CancelReservedInstancesListingOutput) SetReservedInstancesListings(v [] } // Describes a Spot Fleet error. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequestsError type CancelSpotFleetRequestsError struct { _ struct{} `type:"structure"` @@ -25326,7 +25952,6 @@ func (s *CancelSpotFleetRequestsError) SetMessage(v string) *CancelSpotFleetRequ } // Describes a Spot Fleet request that was not successfully canceled. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequestsErrorItem type CancelSpotFleetRequestsErrorItem struct { _ struct{} `type:"structure"` @@ -25364,7 +25989,6 @@ func (s *CancelSpotFleetRequestsErrorItem) SetSpotFleetRequestId(v string) *Canc } // Contains the parameters for CancelSpotFleetRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequestsRequest type CancelSpotFleetRequestsInput struct { _ struct{} `type:"structure"` @@ -25431,7 +26055,6 @@ func (s *CancelSpotFleetRequestsInput) SetTerminateInstances(v bool) *CancelSpot } // Contains the output of CancelSpotFleetRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequestsResponse type CancelSpotFleetRequestsOutput struct { _ struct{} `type:"structure"` @@ -25465,7 +26088,6 @@ func (s *CancelSpotFleetRequestsOutput) SetUnsuccessfulFleetRequests(v []*Cancel } // Describes a Spot Fleet request that was successfully canceled. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequestsSuccessItem type CancelSpotFleetRequestsSuccessItem struct { _ struct{} `type:"structure"` @@ -25514,7 +26136,6 @@ func (s *CancelSpotFleetRequestsSuccessItem) SetSpotFleetRequestId(v string) *Ca } // Contains the parameters for CancelSpotInstanceRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotInstanceRequestsRequest type CancelSpotInstanceRequestsInput struct { _ struct{} `type:"structure"` @@ -25566,7 +26187,6 @@ func (s *CancelSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*string) } // Contains the output of CancelSpotInstanceRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotInstanceRequestsResult type CancelSpotInstanceRequestsOutput struct { _ struct{} `type:"structure"` @@ -25591,7 +26211,6 @@ func (s *CancelSpotInstanceRequestsOutput) SetCancelledSpotInstanceRequests(v [] } // Describes a request to cancel a Spot Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelledSpotInstanceRequest type CancelledSpotInstanceRequest struct { _ struct{} `type:"structure"` @@ -25625,7 +26244,6 @@ func (s *CancelledSpotInstanceRequest) SetState(v string) *CancelledSpotInstance } // Describes an IPv4 CIDR block. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CidrBlock type CidrBlock struct { _ struct{} `type:"structure"` @@ -25650,7 +26268,6 @@ func (s *CidrBlock) SetCidrBlock(v string) *CidrBlock { } // Describes the ClassicLink DNS support status of a VPC. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ClassicLinkDnsSupport type ClassicLinkDnsSupport struct { _ struct{} `type:"structure"` @@ -25684,7 +26301,6 @@ func (s *ClassicLinkDnsSupport) SetVpcId(v string) *ClassicLinkDnsSupport { } // Describes a linked EC2-Classic instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ClassicLinkInstance type ClassicLinkInstance struct { _ struct{} `type:"structure"` @@ -25736,7 +26352,6 @@ func (s *ClassicLinkInstance) SetVpcId(v string) *ClassicLinkInstance { } // Describes a Classic Load Balancer. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ClassicLoadBalancer type ClassicLoadBalancer struct { _ struct{} `type:"structure"` @@ -25777,7 +26392,6 @@ func (s *ClassicLoadBalancer) SetName(v string) *ClassicLoadBalancer { // Describes the Classic Load Balancers to attach to a Spot Fleet. Spot Fleet // registers the running Spot Instances with these Classic Load Balancers. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ClassicLoadBalancersConfig type ClassicLoadBalancersConfig struct { _ struct{} `type:"structure"` @@ -25830,7 +26444,6 @@ func (s *ClassicLoadBalancersConfig) SetClassicLoadBalancers(v []*ClassicLoadBal } // Describes the client-specific data. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ClientData type ClientData struct { _ struct{} `type:"structure"` @@ -25838,13 +26451,13 @@ type ClientData struct { Comment *string `type:"string"` // The time that the disk upload ends. - UploadEnd *time.Time `type:"timestamp" timestampFormat:"iso8601"` + UploadEnd *time.Time `type:"timestamp"` // The size of the uploaded disk image, in GiB. UploadSize *float64 `type:"double"` // The time that the disk upload starts. - UploadStart *time.Time `type:"timestamp" timestampFormat:"iso8601"` + UploadStart *time.Time `type:"timestamp"` } // String returns the string representation @@ -25882,7 +26495,6 @@ func (s *ClientData) SetUploadStart(v time.Time) *ClientData { } // Contains the parameters for ConfirmProductInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConfirmProductInstanceRequest type ConfirmProductInstanceInput struct { _ struct{} `type:"structure"` @@ -25948,7 +26560,6 @@ func (s *ConfirmProductInstanceInput) SetProductCode(v string) *ConfirmProductIn } // Contains the output of ConfirmProductInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConfirmProductInstanceResult type ConfirmProductInstanceOutput struct { _ struct{} `type:"structure"` @@ -25984,7 +26595,6 @@ func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstance } // Describes a connection notification for a VPC endpoint or VPC endpoint service. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConnectionNotification type ConnectionNotification struct { _ struct{} `type:"structure"` @@ -26064,14 +26674,11 @@ func (s *ConnectionNotification) SetVpcEndpointId(v string) *ConnectionNotificat } // Describes a conversion task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConversionTask type ConversionTask struct { _ struct{} `type:"structure"` // The ID of the conversion task. - // - // ConversionTaskId is a required field - ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + ConversionTaskId *string `locationName:"conversionTaskId" type:"string"` // The time when the task expires. If the upload isn't complete before the expiration // time, we automatically cancel the task. @@ -26086,9 +26693,7 @@ type ConversionTask struct { ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"` // The state of the conversion task. - // - // State is a required field - State *string `locationName:"state" type:"string" required:"true" enum:"ConversionTaskState"` + State *string `locationName:"state" type:"string" enum:"ConversionTaskState"` // The status message related to the conversion task. StatusMessage *string `locationName:"statusMessage" type:"string"` @@ -26149,7 +26754,6 @@ func (s *ConversionTask) SetTags(v []*Tag) *ConversionTask { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageRequest type CopyFpgaImageInput struct { _ struct{} `type:"structure"` @@ -26242,7 +26846,6 @@ func (s *CopyFpgaImageInput) SetSourceRegion(v string) *CopyFpgaImageInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageResult type CopyFpgaImageOutput struct { _ struct{} `type:"structure"` @@ -26267,7 +26870,6 @@ func (s *CopyFpgaImageOutput) SetFpgaImageId(v string) *CopyFpgaImageOutput { } // Contains the parameters for CopyImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImageRequest type CopyImageInput struct { _ struct{} `type:"structure"` @@ -26292,14 +26894,34 @@ type CopyImageInput struct { // in the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when - // encrypting the snapshots of an image during a copy operation. This parameter - // is only required if you want to use a non-default CMK; if this parameter - // is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms - // namespace, followed by the region of the CMK, the AWS account ID of the CMK - // owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // An identifier for the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use when creating the encrypted volume. This parameter is only + // required if you want to use a non-default CMK; if this parameter is not specified, + // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted + // flag must also be set. + // + // The CMK identifier may be provided in any of the following formats: + // + // * Key ID + // + // * Key alias, in the form alias/ExampleAlias + // + // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed + // by the region of the CMK, the AWS account ID of the CMK owner, the key + // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // + // + // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, + // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // + // AWS parses KmsKeyId asynchronously, meaning that the action you call may + // appear to complete even though you provided an invalid identifier. This action + // will eventually report failure. + // // The specified CMK must exist in the region that the snapshot is being copied - // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + // to. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // The name of the new AMI in the destination region. @@ -26396,7 +27018,6 @@ func (s *CopyImageInput) SetSourceRegion(v string) *CopyImageInput { } // Contains the output of CopyImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImageResult type CopyImageOutput struct { _ struct{} `type:"structure"` @@ -26421,7 +27042,6 @@ func (s *CopyImageOutput) SetImageId(v string) *CopyImageOutput { } // Contains the parameters for CopySnapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopySnapshotRequest type CopySnapshotInput struct { _ struct{} `type:"structure"` @@ -26432,10 +27052,10 @@ type CopySnapshotInput struct { // copy operation. This parameter is only valid for specifying the destination // region in a PresignedUrl parameter, where it is required. // - // CopySnapshot sends the snapshot copy to the regional endpoint that you send - // the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, - // this is specified with the --region parameter or the default region in your - // AWS configuration file). + // The snapshot copy is sent to the regional endpoint that you sent the HTTP + // request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI, + // this is specified using the --region parameter or the default region in your + // AWS configuration file. DestinationRegion *string `locationName:"destinationRegion" type:"string"` // Checks whether you have the required permissions for the action, without @@ -26453,25 +27073,43 @@ type CopySnapshotInput struct { // the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when - // creating the snapshot copy. This parameter is only required if you want to - // use a non-default CMK; if this parameter is not specified, the default CMK - // for EBS is used. The ARN contains the arn:aws:kms namespace, followed by - // the region of the CMK, the AWS account ID of the CMK owner, the key namespace, - // and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. - // The specified CMK must exist in the region that the snapshot is being copied - // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + // An identifier for the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use when creating the encrypted volume. This parameter is only + // required if you want to use a non-default CMK; if this parameter is not specified, + // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted + // flag must also be set. + // + // The CMK identifier may be provided in any of the following formats: + // + // * Key ID + // + // * Key alias + // + // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed + // by the region of the CMK, the AWS account ID of the CMK owner, the key + // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // + // + // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, + // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // + // AWS parses KmsKeyId asynchronously, meaning that the action you call may + // appear to complete even though you provided an invalid identifier. The action + // will eventually fail. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` - // The pre-signed URL that facilitates copying an encrypted snapshot. This parameter - // is only required when copying an encrypted snapshot with the Amazon EC2 Query - // API; it is available as an optional parameter in all other cases. The PresignedUrl - // should use the snapshot source endpoint, the CopySnapshot action, and include - // the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The - // PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots - // are stored in Amazon S3, the signing algorithm for this parameter uses the - // same logic that is described in Authenticating Requests by Using Query Parameters - // (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // When you copy an encrypted source snapshot using the Amazon EC2 Query API, + // you must supply a pre-signed URL. This parameter is optional for unencrypted + // snapshots. For more information, see Query Requests (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html). + // + // The PresignedUrl should use the snapshot source endpoint, the CopySnapshot + // action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion + // parameters. The PresignedUrl must be signed using AWS Signature Version 4. + // Because EBS snapshots are stored in Amazon S3, the signing algorithm for + // this parameter uses the same logic that is described in Authenticating Requests + // by Using Query Parameters (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) // in the Amazon Simple Storage Service API Reference. An invalid or improperly // signed PresignedUrl will cause the copy operation to fail asynchronously, // and the snapshot will move to an error state. @@ -26563,7 +27201,6 @@ func (s *CopySnapshotInput) SetSourceSnapshotId(v string) *CopySnapshotInput { } // Contains the output of CopySnapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopySnapshotResult type CopySnapshotOutput struct { _ struct{} `type:"structure"` @@ -26587,8 +27224,76 @@ func (s *CopySnapshotOutput) SetSnapshotId(v string) *CopySnapshotOutput { return s } +// The CPU options for the instance. +type CpuOptions struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `locationName:"coreCount" type:"integer"` + + // The number of threads per CPU core. + ThreadsPerCore *int64 `locationName:"threadsPerCore" type:"integer"` +} + +// String returns the string representation +func (s CpuOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CpuOptions) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *CpuOptions) SetCoreCount(v int64) *CpuOptions { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *CpuOptions) SetThreadsPerCore(v int64) *CpuOptions { + s.ThreadsPerCore = &v + return s +} + +// The CPU options for the instance. Both the core count and threads per core +// must be specified in the request. +type CpuOptionsRequest struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `type:"integer"` + + // The number of threads per CPU core. To disable Intel Hyper-Threading Technology + // for the instance, specify a value of 1. Otherwise, specify the default value + // of 2. + ThreadsPerCore *int64 `type:"integer"` +} + +// String returns the string representation +func (s CpuOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CpuOptionsRequest) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *CpuOptionsRequest) SetCoreCount(v int64) *CpuOptionsRequest { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *CpuOptionsRequest) SetThreadsPerCore(v int64) *CpuOptionsRequest { + s.ThreadsPerCore = &v + return s +} + // Contains the parameters for CreateCustomerGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCustomerGatewayRequest type CreateCustomerGatewayInput struct { _ struct{} `type:"structure"` @@ -26671,7 +27376,6 @@ func (s *CreateCustomerGatewayInput) SetType(v string) *CreateCustomerGatewayInp } // Contains the output of CreateCustomerGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCustomerGatewayResult type CreateCustomerGatewayOutput struct { _ struct{} `type:"structure"` @@ -26695,7 +27399,6 @@ func (s *CreateCustomerGatewayOutput) SetCustomerGateway(v *CustomerGateway) *Cr return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultSubnetRequest type CreateDefaultSubnetInput struct { _ struct{} `type:"structure"` @@ -26746,7 +27449,6 @@ func (s *CreateDefaultSubnetInput) SetDryRun(v bool) *CreateDefaultSubnetInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultSubnetResult type CreateDefaultSubnetOutput struct { _ struct{} `type:"structure"` @@ -26771,7 +27473,6 @@ func (s *CreateDefaultSubnetOutput) SetSubnet(v *Subnet) *CreateDefaultSubnetOut } // Contains the parameters for CreateDefaultVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpcRequest type CreateDefaultVpcInput struct { _ struct{} `type:"structure"` @@ -26799,7 +27500,6 @@ func (s *CreateDefaultVpcInput) SetDryRun(v bool) *CreateDefaultVpcInput { } // Contains the output of CreateDefaultVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpcResult type CreateDefaultVpcOutput struct { _ struct{} `type:"structure"` @@ -26824,7 +27524,6 @@ func (s *CreateDefaultVpcOutput) SetVpc(v *Vpc) *CreateDefaultVpcOutput { } // Contains the parameters for CreateDhcpOptions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDhcpOptionsRequest type CreateDhcpOptionsInput struct { _ struct{} `type:"structure"` @@ -26876,7 +27575,6 @@ func (s *CreateDhcpOptionsInput) SetDryRun(v bool) *CreateDhcpOptionsInput { } // Contains the output of CreateDhcpOptions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDhcpOptionsResult type CreateDhcpOptionsOutput struct { _ struct{} `type:"structure"` @@ -26900,12 +27598,11 @@ func (s *CreateDhcpOptionsOutput) SetDhcpOptions(v *DhcpOptions) *CreateDhcpOpti return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateEgressOnlyInternetGatewayRequest type CreateEgressOnlyInternetGatewayInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). ClientToken *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -26914,7 +27611,7 @@ type CreateEgressOnlyInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the VPC for which to create the egress-only Internet gateway. + // The ID of the VPC for which to create the egress-only internet gateway. // // VpcId is a required field VpcId *string `type:"string" required:"true"` @@ -26961,15 +27658,14 @@ func (s *CreateEgressOnlyInternetGatewayInput) SetVpcId(v string) *CreateEgressO return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateEgressOnlyInternetGatewayResult type CreateEgressOnlyInternetGatewayOutput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. ClientToken *string `locationName:"clientToken" type:"string"` - // Information about the egress-only Internet gateway. + // Information about the egress-only internet gateway. EgressOnlyInternetGateway *EgressOnlyInternetGateway `locationName:"egressOnlyInternetGateway" type:"structure"` } @@ -26995,25 +27691,258 @@ func (s *CreateEgressOnlyInternetGatewayOutput) SetEgressOnlyInternetGateway(v * return s } +type CreateFleetInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether running instances should be terminated if the total target + // capacity of the EC2 Fleet is decreased below the current size of the EC2 + // Fleet. + ExcessCapacityTerminationPolicy *string `type:"string" enum:"FleetExcessCapacityTerminationPolicy"` + + // The configuration for the EC2 Fleet. + // + // LaunchTemplateConfigs is a required field + LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationNameList:"item" type:"list" required:"true"` + + // The allocation strategy of On-Demand Instances in an EC2 Fleet. + OnDemandOptions *OnDemandOptionsRequest `type:"structure"` + + // Indicates whether EC2 Fleet should replace unhealthy instances. + ReplaceUnhealthyInstances *bool `type:"boolean"` + + // Describes the configuration of Spot Instances in an EC2 Fleet. + SpotOptions *SpotOptionsRequest `type:"structure"` + + // The key-value pair for tagging the EC2 Fleet request on creation. The value + // for ResourceType must be fleet, otherwise the fleet request fails. To tag + // instances at launch, specify the tags in the launch template (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template). + // For information about tagging after launch, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The TotalTargetCapacity, OnDemandTargetCapacity, SpotTargetCapacity, and + // DefaultCapacityType structure. + // + // TargetCapacitySpecification is a required field + TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure" required:"true"` + + // Indicates whether running instances should be terminated when the EC2 Fleet + // expires. + TerminateInstancesWithExpiration *bool `type:"boolean"` + + // The type of request. Indicates whether the EC2 Fleet only requests the target + // capacity, or also attempts to maintain it. If you request a certain target + // capacity, EC2 Fleet only places the required requests. It does not attempt + // to replenish instances if capacity is diminished, and does not submit requests + // in alternative capacity pools if capacity is unavailable. To maintain a certain + // target capacity, EC2 Fleet places the required requests to meet this target + // capacity. It also automatically replenishes any interrupted Spot Instances. + // Default: maintain. + Type *string `type:"string" enum:"FleetType"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `type:"timestamp"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new EC2 Fleet requests are placed or able to fulfill the + // request. The default end date is 7 days from the current date. + ValidUntil *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CreateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFleetInput"} + if s.LaunchTemplateConfigs == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchTemplateConfigs")) + } + if s.TargetCapacitySpecification == nil { + invalidParams.Add(request.NewErrParamRequired("TargetCapacitySpecification")) + } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TargetCapacitySpecification != nil { + if err := s.TargetCapacitySpecification.Validate(); err != nil { + invalidParams.AddNested("TargetCapacitySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFleetInput) SetClientToken(v string) *CreateFleetInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateFleetInput) SetDryRun(v bool) *CreateFleetInput { + s.DryRun = &v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *CreateFleetInput) SetExcessCapacityTerminationPolicy(v string) *CreateFleetInput { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *CreateFleetInput) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfigRequest) *CreateFleetInput { + s.LaunchTemplateConfigs = v + return s +} + +// SetOnDemandOptions sets the OnDemandOptions field's value. +func (s *CreateFleetInput) SetOnDemandOptions(v *OnDemandOptionsRequest) *CreateFleetInput { + s.OnDemandOptions = v + return s +} + +// SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. +func (s *CreateFleetInput) SetReplaceUnhealthyInstances(v bool) *CreateFleetInput { + s.ReplaceUnhealthyInstances = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *CreateFleetInput) SetSpotOptions(v *SpotOptionsRequest) *CreateFleetInput { + s.SpotOptions = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFleetInput) SetTagSpecifications(v []*TagSpecification) *CreateFleetInput { + s.TagSpecifications = v + return s +} + +// SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. +func (s *CreateFleetInput) SetTargetCapacitySpecification(v *TargetCapacitySpecificationRequest) *CreateFleetInput { + s.TargetCapacitySpecification = v + return s +} + +// SetTerminateInstancesWithExpiration sets the TerminateInstancesWithExpiration field's value. +func (s *CreateFleetInput) SetTerminateInstancesWithExpiration(v bool) *CreateFleetInput { + s.TerminateInstancesWithExpiration = &v + return s +} + +// SetType sets the Type field's value. +func (s *CreateFleetInput) SetType(v string) *CreateFleetInput { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *CreateFleetInput) SetValidFrom(v time.Time) *CreateFleetInput { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *CreateFleetInput) SetValidUntil(v time.Time) *CreateFleetInput { + s.ValidUntil = &v + return s +} + +type CreateFleetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` +} + +// String returns the string representation +func (s CreateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *CreateFleetOutput) SetFleetId(v string) *CreateFleetOutput { + s.FleetId = &v + return s +} + // Contains the parameters for CreateFlowLogs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFlowLogsRequest type CreateFlowLogsInput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). ClientToken *string `type:"string"` - // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs - // log group. + // The ARN for the IAM role that's used to post flow logs to a log group. + DeliverLogsPermissionArn *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specifies the destination to which the flow log data is to be published. + // Flow log data can be published to an CloudWatch Logs log group or an Amazon + // S3 bucket. The value specified for this parameter depends on the value specified + // for LogDestinationType. // - // DeliverLogsPermissionArn is a required field - DeliverLogsPermissionArn *string `type:"string" required:"true"` + // If LogDestinationType is not specified or cloud-watch-logs, specify the Amazon + // Resource Name (ARN) of the CloudWatch Logs log group. + // + // If LogDestinationType is s3, specify the ARN of the Amazon S3 bucket. You + // can also specify a subfolder in the bucket. To specify a subfolder in the + // bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, + // to specify a subfolder named my-logs in a bucket named my-bucket, use the + // following ARN: arn:aws:s3:::my-bucket/my-logs/. + LogDestination *string `type:"string"` - // The name of the CloudWatch log group. + // Specifies the type of destination to which the flow log data is to be published. + // Flow log data can be published to CloudWatch Logs or Amazon S3. To publish + // flow log data to CloudWatch Logs, specify cloud-watch-logs. To publish flow + // log data to Amazon S3, specify s3. // - // LogGroupName is a required field - LogGroupName *string `type:"string" required:"true"` + // Default: cloud-watch-logs + LogDestinationType *string `type:"string" enum:"LogDestinationType"` + + // The name of the log group. + LogGroupName *string `type:"string"` // One or more subnet, network interface, or VPC IDs. // @@ -27046,12 +27975,6 @@ func (s CreateFlowLogsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateFlowLogsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateFlowLogsInput"} - if s.DeliverLogsPermissionArn == nil { - invalidParams.Add(request.NewErrParamRequired("DeliverLogsPermissionArn")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } if s.ResourceIds == nil { invalidParams.Add(request.NewErrParamRequired("ResourceIds")) } @@ -27080,6 +28003,24 @@ func (s *CreateFlowLogsInput) SetDeliverLogsPermissionArn(v string) *CreateFlowL return s } +// SetDryRun sets the DryRun field's value. +func (s *CreateFlowLogsInput) SetDryRun(v bool) *CreateFlowLogsInput { + s.DryRun = &v + return s +} + +// SetLogDestination sets the LogDestination field's value. +func (s *CreateFlowLogsInput) SetLogDestination(v string) *CreateFlowLogsInput { + s.LogDestination = &v + return s +} + +// SetLogDestinationType sets the LogDestinationType field's value. +func (s *CreateFlowLogsInput) SetLogDestinationType(v string) *CreateFlowLogsInput { + s.LogDestinationType = &v + return s +} + // SetLogGroupName sets the LogGroupName field's value. func (s *CreateFlowLogsInput) SetLogGroupName(v string) *CreateFlowLogsInput { s.LogGroupName = &v @@ -27105,12 +28046,11 @@ func (s *CreateFlowLogsInput) SetTrafficType(v string) *CreateFlowLogsInput { } // Contains the output of CreateFlowLogs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFlowLogsResult type CreateFlowLogsOutput struct { _ struct{} `type:"structure"` - // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. ClientToken *string `locationName:"clientToken" type:"string"` // The IDs of the flow logs. @@ -27148,7 +28088,6 @@ func (s *CreateFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *CreateFlo return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImageRequest type CreateFpgaImageInput struct { _ struct{} `type:"structure"` @@ -27237,7 +28176,6 @@ func (s *CreateFpgaImageInput) SetName(v string) *CreateFpgaImageInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImageResult type CreateFpgaImageOutput struct { _ struct{} `type:"structure"` @@ -27271,7 +28209,6 @@ func (s *CreateFpgaImageOutput) SetFpgaImageId(v string) *CreateFpgaImageOutput } // Contains the parameters for CreateImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateImageRequest type CreateImageInput struct { _ struct{} `type:"structure"` @@ -27371,7 +28308,6 @@ func (s *CreateImageInput) SetNoReboot(v bool) *CreateImageInput { } // Contains the output of CreateImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateImageResult type CreateImageOutput struct { _ struct{} `type:"structure"` @@ -27396,7 +28332,6 @@ func (s *CreateImageOutput) SetImageId(v string) *CreateImageOutput { } // Contains the parameters for CreateInstanceExportTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceExportTaskRequest type CreateInstanceExportTaskInput struct { _ struct{} `type:"structure"` @@ -27464,7 +28399,6 @@ func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateIn } // Contains the output for CreateInstanceExportTask. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceExportTaskResult type CreateInstanceExportTaskOutput struct { _ struct{} `type:"structure"` @@ -27489,7 +28423,6 @@ func (s *CreateInstanceExportTaskOutput) SetExportTask(v *ExportTask) *CreateIns } // Contains the parameters for CreateInternetGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInternetGatewayRequest type CreateInternetGatewayInput struct { _ struct{} `type:"structure"` @@ -27517,11 +28450,10 @@ func (s *CreateInternetGatewayInput) SetDryRun(v bool) *CreateInternetGatewayInp } // Contains the output of CreateInternetGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInternetGatewayResult type CreateInternetGatewayOutput struct { _ struct{} `type:"structure"` - // Information about the Internet gateway. + // Information about the internet gateway. InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"` } @@ -27542,7 +28474,6 @@ func (s *CreateInternetGatewayOutput) SetInternetGateway(v *InternetGateway) *Cr } // Contains the parameters for CreateKeyPair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateKeyPairRequest type CreateKeyPairInput struct { _ struct{} `type:"structure"` @@ -27596,7 +28527,6 @@ func (s *CreateKeyPairInput) SetKeyName(v string) *CreateKeyPairInput { } // Describes a key pair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/KeyPair type CreateKeyPairOutput struct { _ struct{} `type:"structure"` @@ -27638,7 +28568,6 @@ func (s *CreateKeyPairOutput) SetKeyName(v string) *CreateKeyPairOutput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplateRequest type CreateLaunchTemplateInput struct { _ struct{} `type:"structure"` @@ -27730,7 +28659,6 @@ func (s *CreateLaunchTemplateInput) SetVersionDescription(v string) *CreateLaunc return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplateResult type CreateLaunchTemplateOutput struct { _ struct{} `type:"structure"` @@ -27754,7 +28682,6 @@ func (s *CreateLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Creat return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplateVersionRequest type CreateLaunchTemplateVersionInput struct { _ struct{} `type:"structure"` @@ -27863,7 +28790,6 @@ func (s *CreateLaunchTemplateVersionInput) SetVersionDescription(v string) *Crea return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplateVersionResult type CreateLaunchTemplateVersionOutput struct { _ struct{} `type:"structure"` @@ -27888,7 +28814,6 @@ func (s *CreateLaunchTemplateVersionOutput) SetLaunchTemplateVersion(v *LaunchTe } // Contains the parameters for CreateNatGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNatGatewayRequest type CreateNatGatewayInput struct { _ struct{} `type:"structure"` @@ -27899,8 +28824,8 @@ type CreateNatGatewayInput struct { // AllocationId is a required field AllocationId *string `type:"string" required:"true"` - // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). // // Constraint: Maximum 64 ASCII characters. ClientToken *string `type:"string"` @@ -27956,7 +28881,6 @@ func (s *CreateNatGatewayInput) SetSubnetId(v string) *CreateNatGatewayInput { } // Contains the output of CreateNatGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNatGatewayResult type CreateNatGatewayOutput struct { _ struct{} `type:"structure"` @@ -27991,7 +28915,6 @@ func (s *CreateNatGatewayOutput) SetNatGateway(v *NatGateway) *CreateNatGatewayO } // Contains the parameters for CreateNetworkAclEntry. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclEntryRequest type CreateNetworkAclEntryInput struct { _ struct{} `type:"structure"` @@ -28026,12 +28949,12 @@ type CreateNetworkAclEntryInput struct { PortRange *PortRange `locationName:"portRange" type:"structure"` // The protocol. A value of -1 or all means all protocols. If you specify all, - // -1, or a protocol number other than tcp, udp, or icmp, traffic on all ports - // is allowed, regardless of any ports or ICMP types or codes you specify. If - // you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic - // for all ICMP types and codes allowed, regardless of any that you specify. - // If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must - // specify an ICMP type and code. + // -1, or a protocol number other than 6 (tcp), 17 (udp), or 1 (icmp), traffic + // on all ports is allowed, regardless of any ports or ICMP types or codes that + // you specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR + // block, traffic for all ICMP types and codes allowed, regardless of any that + // you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR + // block, you must specify an ICMP type and code. // // Protocol is a required field Protocol *string `locationName:"protocol" type:"string" required:"true"` @@ -28146,7 +29069,6 @@ func (s *CreateNetworkAclEntryInput) SetRuleNumber(v int64) *CreateNetworkAclEnt return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclEntryOutput type CreateNetworkAclEntryOutput struct { _ struct{} `type:"structure"` } @@ -28162,7 +29084,6 @@ func (s CreateNetworkAclEntryOutput) GoString() string { } // Contains the parameters for CreateNetworkAcl. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclRequest type CreateNetworkAclInput struct { _ struct{} `type:"structure"` @@ -28214,7 +29135,6 @@ func (s *CreateNetworkAclInput) SetVpcId(v string) *CreateNetworkAclInput { } // Contains the output of CreateNetworkAcl. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclResult type CreateNetworkAclOutput struct { _ struct{} `type:"structure"` @@ -28239,7 +29159,6 @@ func (s *CreateNetworkAclOutput) SetNetworkAcl(v *NetworkAcl) *CreateNetworkAclO } // Contains the parameters for CreateNetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfaceRequest type CreateNetworkInterfaceInput struct { _ struct{} `type:"structure"` @@ -28309,16 +29228,6 @@ func (s *CreateNetworkInterfaceInput) Validate() error { if s.SubnetId == nil { invalidParams.Add(request.NewErrParamRequired("SubnetId")) } - if s.PrivateIpAddresses != nil { - for i, v := range s.PrivateIpAddresses { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -28381,7 +29290,6 @@ func (s *CreateNetworkInterfaceInput) SetSubnetId(v string) *CreateNetworkInterf } // Contains the output of CreateNetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfaceResult type CreateNetworkInterfaceOutput struct { _ struct{} `type:"structure"` @@ -28406,7 +29314,6 @@ func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) } // Contains the parameters for CreateNetworkInterfacePermission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionRequest type CreateNetworkInterfacePermissionInput struct { _ struct{} `type:"structure"` @@ -28490,7 +29397,6 @@ func (s *CreateNetworkInterfacePermissionInput) SetPermission(v string) *CreateN } // Contains the output of CreateNetworkInterfacePermission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionResult type CreateNetworkInterfacePermissionOutput struct { _ struct{} `type:"structure"` @@ -28515,7 +29421,6 @@ func (s *CreateNetworkInterfacePermissionOutput) SetInterfacePermission(v *Netwo } // Contains the parameters for CreatePlacementGroup. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroupRequest type CreatePlacementGroupInput struct { _ struct{} `type:"structure"` @@ -28583,7 +29488,6 @@ func (s *CreatePlacementGroupInput) SetStrategy(v string) *CreatePlacementGroupI return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroupOutput type CreatePlacementGroupOutput struct { _ struct{} `type:"structure"` } @@ -28599,7 +29503,6 @@ func (s CreatePlacementGroupOutput) GoString() string { } // Contains the parameters for CreateReservedInstancesListing. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReservedInstancesListingRequest type CreateReservedInstancesListingInput struct { _ struct{} `type:"structure"` @@ -28687,7 +29590,6 @@ func (s *CreateReservedInstancesListingInput) SetReservedInstancesId(v string) * } // Contains the output of CreateReservedInstancesListing. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReservedInstancesListingResult type CreateReservedInstancesListingOutput struct { _ struct{} `type:"structure"` @@ -28712,7 +29614,6 @@ func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v [] } // Contains the parameters for CreateRoute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteRequest type CreateRouteInput struct { _ struct{} `type:"structure"` @@ -28730,10 +29631,10 @@ type CreateRouteInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // [IPv6 traffic only] The ID of an egress-only Internet gateway. + // [IPv6 traffic only] The ID of an egress-only internet gateway. EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` - // The ID of an Internet gateway or virtual private gateway attached to your + // The ID of an internet gateway or virtual private gateway attached to your // VPC. GatewayId *string `locationName:"gatewayId" type:"string"` @@ -28840,7 +29741,6 @@ func (s *CreateRouteInput) SetVpcPeeringConnectionId(v string) *CreateRouteInput } // Contains the output of CreateRoute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteResult type CreateRouteOutput struct { _ struct{} `type:"structure"` @@ -28865,7 +29765,6 @@ func (s *CreateRouteOutput) SetReturn(v bool) *CreateRouteOutput { } // Contains the parameters for CreateRouteTable. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteTableRequest type CreateRouteTableInput struct { _ struct{} `type:"structure"` @@ -28917,7 +29816,6 @@ func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput { } // Contains the output of CreateRouteTable. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteTableResult type CreateRouteTableOutput struct { _ struct{} `type:"structure"` @@ -28942,7 +29840,6 @@ func (s *CreateRouteTableOutput) SetRouteTable(v *RouteTable) *CreateRouteTableO } // Contains the parameters for CreateSecurityGroup. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSecurityGroupRequest type CreateSecurityGroupInput struct { _ struct{} `type:"structure"` @@ -28965,7 +29862,7 @@ type CreateSecurityGroupInput struct { // The name of the security group. // - // Constraints: Up to 255 characters in length + // Constraints: Up to 255 characters in length. Cannot start with sg-. // // Constraints for EC2-Classic: ASCII characters // @@ -29029,7 +29926,6 @@ func (s *CreateSecurityGroupInput) SetVpcId(v string) *CreateSecurityGroupInput } // Contains the output of CreateSecurityGroup. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSecurityGroupResult type CreateSecurityGroupOutput struct { _ struct{} `type:"structure"` @@ -29054,7 +29950,6 @@ func (s *CreateSecurityGroupOutput) SetGroupId(v string) *CreateSecurityGroupOut } // Contains the parameters for CreateSnapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshotRequest type CreateSnapshotInput struct { _ struct{} `type:"structure"` @@ -29067,6 +29962,9 @@ type CreateSnapshotInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to apply to the snapshot during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the EBS volume. // // VolumeId is a required field @@ -29108,6 +30006,12 @@ func (s *CreateSnapshotInput) SetDryRun(v bool) *CreateSnapshotInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSnapshotInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotInput { + s.TagSpecifications = v + return s +} + // SetVolumeId sets the VolumeId field's value. func (s *CreateSnapshotInput) SetVolumeId(v string) *CreateSnapshotInput { s.VolumeId = &v @@ -29115,7 +30019,6 @@ func (s *CreateSnapshotInput) SetVolumeId(v string) *CreateSnapshotInput { } // Contains the parameters for CreateSpotDatafeedSubscription. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSpotDatafeedSubscriptionRequest type CreateSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` @@ -29176,7 +30079,6 @@ func (s *CreateSpotDatafeedSubscriptionInput) SetPrefix(v string) *CreateSpotDat } // Contains the output of CreateSpotDatafeedSubscription. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSpotDatafeedSubscriptionResult type CreateSpotDatafeedSubscriptionOutput struct { _ struct{} `type:"structure"` @@ -29201,7 +30103,6 @@ func (s *CreateSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *Sp } // Contains the parameters for CreateSubnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnetRequest type CreateSubnetInput struct { _ struct{} `type:"structure"` @@ -29289,7 +30190,6 @@ func (s *CreateSubnetInput) SetVpcId(v string) *CreateSubnetInput { } // Contains the output of CreateSubnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnetResult type CreateSubnetOutput struct { _ struct{} `type:"structure"` @@ -29314,7 +30214,6 @@ func (s *CreateSubnetOutput) SetSubnet(v *Subnet) *CreateSubnetOutput { } // Contains the parameters for CreateTags. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTagsRequest type CreateTagsInput struct { _ struct{} `type:"structure"` @@ -29381,7 +30280,6 @@ func (s *CreateTagsInput) SetTags(v []*Tag) *CreateTagsInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTagsOutput type CreateTagsOutput struct { _ struct{} `type:"structure"` } @@ -29397,7 +30295,6 @@ func (s CreateTagsOutput) GoString() string { } // Contains the parameters for CreateVolume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVolumeRequest type CreateVolumeInput struct { _ struct{} `type:"structure"` @@ -29422,20 +30319,39 @@ type CreateVolumeInput struct { // in the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // Only valid for Provisioned IOPS SSD volumes. The number of I/O operations - // per second (IOPS) to provision for the volume, with a maximum ratio of 50 - // IOPS/GiB. + // The number of I/O operations per second (IOPS) to provision for the volume, + // with a maximum ratio of 50 IOPS/GiB. Range is 100 to 32000 IOPS for volumes + // in most regions. For exceptions, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. // - // Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes + // This parameter is valid only for Provisioned IOPS SSD (io1) volumes. Iops *int64 `type:"integer"` - // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // An identifier for the AWS Key Management Service (AWS KMS) customer master // key (CMK) to use when creating the encrypted volume. This parameter is only // required if you want to use a non-default CMK; if this parameter is not specified, - // the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, - // followed by the region of the CMK, the AWS account ID of the CMK owner, the - // key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. - // If a KmsKeyId is specified, the Encrypted flag must also be set. + // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted + // flag must also be set. + // + // The CMK identifier may be provided in any of the following formats: + // + // * Key ID + // + // * Key alias + // + // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed + // by the region of the CMK, the AWS account ID of the CMK owner, the key + // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // + // + // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, + // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // + // AWS parses KmsKeyId asynchronously, meaning that the action you call may + // appear to complete even though you provided an invalid identifier. The action + // will eventually fail. KmsKeyId *string `type:"string"` // The size of the volume, in GiBs. @@ -29458,7 +30374,10 @@ type CreateVolumeInput struct { // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard // for Magnetic volumes. // - // Default: standard + // Defaults: If no volume type is specified, the default is standard in us-east-1, + // eu-west-1, eu-central-1, us-west-2, us-west-1, sa-east-1, ap-northeast-1, + // ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, us-gov-west-1, + // and cn-north-1. In all other regions, EBS defaults to gp2. VolumeType *string `type:"string" enum:"VolumeType"` } @@ -29541,7 +30460,6 @@ func (s *CreateVolumeInput) SetVolumeType(v string) *CreateVolumeInput { // Describes the user or group to be added or removed from the permissions for // a volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVolumePermission type CreateVolumePermission struct { _ struct{} `type:"structure"` @@ -29577,7 +30495,6 @@ func (s *CreateVolumePermission) SetUserId(v string) *CreateVolumePermission { } // Describes modifications to the permissions for a volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVolumePermissionModifications type CreateVolumePermissionModifications struct { _ struct{} `type:"structure"` @@ -29612,7 +30529,6 @@ func (s *CreateVolumePermissionModifications) SetRemove(v []*CreateVolumePermiss return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointConnectionNotificationRequest type CreateVpcEndpointConnectionNotificationInput struct { _ struct{} `type:"structure"` @@ -29706,7 +30622,6 @@ func (s *CreateVpcEndpointConnectionNotificationInput) SetVpcEndpointId(v string return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointConnectionNotificationResult type CreateVpcEndpointConnectionNotificationOutput struct { _ struct{} `type:"structure"` @@ -29741,7 +30656,6 @@ func (s *CreateVpcEndpointConnectionNotificationOutput) SetConnectionNotificatio } // Contains the parameters for CreateVpcEndpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointRequest type CreateVpcEndpointInput struct { _ struct{} `type:"structure"` @@ -29784,7 +30698,7 @@ type CreateVpcEndpointInput struct { SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` // The service name. To get a list of available services, use the DescribeVpcEndpointServices - // request. + // request, or get the name from the service provider. // // ServiceName is a required field ServiceName *string `type:"string" required:"true"` @@ -29891,7 +30805,6 @@ func (s *CreateVpcEndpointInput) SetVpcId(v string) *CreateVpcEndpointInput { } // Contains the output of CreateVpcEndpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointResult type CreateVpcEndpointOutput struct { _ struct{} `type:"structure"` @@ -29925,7 +30838,6 @@ func (s *CreateVpcEndpointOutput) SetVpcEndpoint(v *VpcEndpoint) *CreateVpcEndpo return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointServiceConfigurationRequest type CreateVpcEndpointServiceConfigurationInput struct { _ struct{} `type:"structure"` @@ -29997,7 +30909,6 @@ func (s *CreateVpcEndpointServiceConfigurationInput) SetNetworkLoadBalancerArns( return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointServiceConfigurationResult type CreateVpcEndpointServiceConfigurationOutput struct { _ struct{} `type:"structure"` @@ -30032,7 +30943,6 @@ func (s *CreateVpcEndpointServiceConfigurationOutput) SetServiceConfiguration(v } // Contains the parameters for CreateVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcRequest type CreateVpcInput struct { _ struct{} `type:"structure"` @@ -30113,7 +31023,6 @@ func (s *CreateVpcInput) SetInstanceTenancy(v string) *CreateVpcInput { } // Contains the output of CreateVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcResult type CreateVpcOutput struct { _ struct{} `type:"structure"` @@ -30138,7 +31047,6 @@ func (s *CreateVpcOutput) SetVpc(v *Vpc) *CreateVpcOutput { } // Contains the parameters for CreateVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcPeeringConnectionRequest type CreateVpcPeeringConnectionInput struct { _ struct{} `type:"structure"` @@ -30208,7 +31116,6 @@ func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringCo } // Contains the output of CreateVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcPeeringConnectionResult type CreateVpcPeeringConnectionOutput struct { _ struct{} `type:"structure"` @@ -30233,7 +31140,6 @@ func (s *CreateVpcPeeringConnectionOutput) SetVpcPeeringConnection(v *VpcPeering } // Contains the parameters for CreateVpnConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionRequest type CreateVpnConnectionInput struct { _ struct{} `type:"structure"` @@ -30322,7 +31228,6 @@ func (s *CreateVpnConnectionInput) SetVpnGatewayId(v string) *CreateVpnConnectio } // Contains the output of CreateVpnConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionResult type CreateVpnConnectionOutput struct { _ struct{} `type:"structure"` @@ -30347,7 +31252,6 @@ func (s *CreateVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *CreateVp } // Contains the parameters for CreateVpnConnectionRoute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionRouteRequest type CreateVpnConnectionRouteInput struct { _ struct{} `type:"structure"` @@ -30400,7 +31304,6 @@ func (s *CreateVpnConnectionRouteInput) SetVpnConnectionId(v string) *CreateVpnC return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionRouteOutput type CreateVpnConnectionRouteOutput struct { _ struct{} `type:"structure"` } @@ -30416,7 +31319,6 @@ func (s CreateVpnConnectionRouteOutput) GoString() string { } // Contains the parameters for CreateVpnGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnGatewayRequest type CreateVpnGatewayInput struct { _ struct{} `type:"structure"` @@ -30490,7 +31392,6 @@ func (s *CreateVpnGatewayInput) SetType(v string) *CreateVpnGatewayInput { } // Contains the output of CreateVpnGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnGatewayResult type CreateVpnGatewayOutput struct { _ struct{} `type:"structure"` @@ -30515,11 +31416,11 @@ func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayO } // Describes the credit option for CPU usage of a T2 instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreditSpecification type CreditSpecification struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 instance. + // The credit option for CPU usage of a T2 instance. Valid values are standard + // and unlimited. CpuCredits *string `locationName:"cpuCredits" type:"string"` } @@ -30540,7 +31441,6 @@ func (s *CreditSpecification) SetCpuCredits(v string) *CreditSpecification { } // The credit option for CPU usage of a T2 instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreditSpecificationRequest type CreditSpecificationRequest struct { _ struct{} `type:"structure"` @@ -30581,7 +31481,6 @@ func (s *CreditSpecificationRequest) SetCpuCredits(v string) *CreditSpecificatio } // Describes a customer gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CustomerGateway type CustomerGateway struct { _ struct{} `type:"structure"` @@ -30653,7 +31552,6 @@ func (s *CustomerGateway) SetType(v string) *CustomerGateway { } // Contains the parameters for DeleteCustomerGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCustomerGatewayRequest type DeleteCustomerGatewayInput struct { _ struct{} `type:"structure"` @@ -30704,7 +31602,6 @@ func (s *DeleteCustomerGatewayInput) SetDryRun(v bool) *DeleteCustomerGatewayInp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCustomerGatewayOutput type DeleteCustomerGatewayOutput struct { _ struct{} `type:"structure"` } @@ -30720,7 +31617,6 @@ func (s DeleteCustomerGatewayOutput) GoString() string { } // Contains the parameters for DeleteDhcpOptions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteDhcpOptionsRequest type DeleteDhcpOptionsInput struct { _ struct{} `type:"structure"` @@ -30771,7 +31667,6 @@ func (s *DeleteDhcpOptionsInput) SetDryRun(v bool) *DeleteDhcpOptionsInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteDhcpOptionsOutput type DeleteDhcpOptionsOutput struct { _ struct{} `type:"structure"` } @@ -30786,7 +31681,6 @@ func (s DeleteDhcpOptionsOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteEgressOnlyInternetGatewayRequest type DeleteEgressOnlyInternetGatewayInput struct { _ struct{} `type:"structure"` @@ -30796,7 +31690,7 @@ type DeleteEgressOnlyInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the egress-only Internet gateway. + // The ID of the egress-only internet gateway. // // EgressOnlyInternetGatewayId is a required field EgressOnlyInternetGatewayId *string `type:"string" required:"true"` @@ -30837,7 +31731,6 @@ func (s *DeleteEgressOnlyInternetGatewayInput) SetEgressOnlyInternetGatewayId(v return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteEgressOnlyInternetGatewayResult type DeleteEgressOnlyInternetGatewayOutput struct { _ struct{} `type:"structure"` @@ -30861,11 +31754,221 @@ func (s *DeleteEgressOnlyInternetGatewayOutput) SetReturnCode(v bool) *DeleteEgr return s } +// Describes an EC2 Fleet error. +type DeleteFleetError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"DeleteFleetErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteFleetError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *DeleteFleetError) SetCode(v string) *DeleteFleetError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DeleteFleetError) SetMessage(v string) *DeleteFleetError { + s.Message = &v + return s +} + +// Describes an EC2 Fleet that was not successfully deleted. +type DeleteFleetErrorItem struct { + _ struct{} `type:"structure"` + + // The error. + Error *DeleteFleetError `locationName:"error" type:"structure"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` +} + +// String returns the string representation +func (s DeleteFleetErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetErrorItem) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *DeleteFleetErrorItem) SetError(v *DeleteFleetError) *DeleteFleetErrorItem { + s.Error = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteFleetErrorItem) SetFleetId(v string) *DeleteFleetErrorItem { + s.FleetId = &v + return s +} + +// Describes an EC2 Fleet that was successfully deleted. +type DeleteFleetSuccessItem struct { + _ struct{} `type:"structure"` + + // The current state of the EC2 Fleet. + CurrentFleetState *string `locationName:"currentFleetState" type:"string" enum:"FleetStateCode"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // The previous state of the EC2 Fleet. + PreviousFleetState *string `locationName:"previousFleetState" type:"string" enum:"FleetStateCode"` +} + +// String returns the string representation +func (s DeleteFleetSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetSuccessItem) GoString() string { + return s.String() +} + +// SetCurrentFleetState sets the CurrentFleetState field's value. +func (s *DeleteFleetSuccessItem) SetCurrentFleetState(v string) *DeleteFleetSuccessItem { + s.CurrentFleetState = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteFleetSuccessItem) SetFleetId(v string) *DeleteFleetSuccessItem { + s.FleetId = &v + return s +} + +// SetPreviousFleetState sets the PreviousFleetState field's value. +func (s *DeleteFleetSuccessItem) SetPreviousFleetState(v string) *DeleteFleetSuccessItem { + s.PreviousFleetState = &v + return s +} + +type DeleteFleetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the EC2 Fleets. + // + // FleetIds is a required field + FleetIds []*string `locationName:"FleetId" type:"list" required:"true"` + + // Indicates whether to terminate instances for an EC2 Fleet if it is deleted + // successfully. + // + // TerminateInstances is a required field + TerminateInstances *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DeleteFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFleetsInput"} + if s.FleetIds == nil { + invalidParams.Add(request.NewErrParamRequired("FleetIds")) + } + if s.TerminateInstances == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFleetsInput) SetDryRun(v bool) *DeleteFleetsInput { + s.DryRun = &v + return s +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DeleteFleetsInput) SetFleetIds(v []*string) *DeleteFleetsInput { + s.FleetIds = v + return s +} + +// SetTerminateInstances sets the TerminateInstances field's value. +func (s *DeleteFleetsInput) SetTerminateInstances(v bool) *DeleteFleetsInput { + s.TerminateInstances = &v + return s +} + +type DeleteFleetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Fleets that are successfully deleted. + SuccessfulFleetDeletions []*DeleteFleetSuccessItem `locationName:"successfulFleetDeletionSet" locationNameList:"item" type:"list"` + + // Information about the EC2 Fleets that are not successfully deleted. + UnsuccessfulFleetDeletions []*DeleteFleetErrorItem `locationName:"unsuccessfulFleetDeletionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetsOutput) GoString() string { + return s.String() +} + +// SetSuccessfulFleetDeletions sets the SuccessfulFleetDeletions field's value. +func (s *DeleteFleetsOutput) SetSuccessfulFleetDeletions(v []*DeleteFleetSuccessItem) *DeleteFleetsOutput { + s.SuccessfulFleetDeletions = v + return s +} + +// SetUnsuccessfulFleetDeletions sets the UnsuccessfulFleetDeletions field's value. +func (s *DeleteFleetsOutput) SetUnsuccessfulFleetDeletions(v []*DeleteFleetErrorItem) *DeleteFleetsOutput { + s.UnsuccessfulFleetDeletions = v + return s +} + // Contains the parameters for DeleteFlowLogs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFlowLogsRequest type DeleteFlowLogsInput struct { _ struct{} `type:"structure"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + // One or more flow log IDs. // // FlowLogIds is a required field @@ -30895,6 +31998,12 @@ func (s *DeleteFlowLogsInput) Validate() error { return nil } +// SetDryRun sets the DryRun field's value. +func (s *DeleteFlowLogsInput) SetDryRun(v bool) *DeleteFlowLogsInput { + s.DryRun = &v + return s +} + // SetFlowLogIds sets the FlowLogIds field's value. func (s *DeleteFlowLogsInput) SetFlowLogIds(v []*string) *DeleteFlowLogsInput { s.FlowLogIds = v @@ -30902,7 +32011,6 @@ func (s *DeleteFlowLogsInput) SetFlowLogIds(v []*string) *DeleteFlowLogsInput { } // Contains the output of DeleteFlowLogs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFlowLogsResult type DeleteFlowLogsOutput struct { _ struct{} `type:"structure"` @@ -30926,7 +32034,6 @@ func (s *DeleteFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteFlo return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageRequest type DeleteFpgaImageInput struct { _ struct{} `type:"structure"` @@ -30977,7 +32084,6 @@ func (s *DeleteFpgaImageInput) SetFpgaImageId(v string) *DeleteFpgaImageInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageResult type DeleteFpgaImageOutput struct { _ struct{} `type:"structure"` @@ -31002,7 +32108,6 @@ func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput { } // Contains the parameters for DeleteInternetGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGatewayRequest type DeleteInternetGatewayInput struct { _ struct{} `type:"structure"` @@ -31012,7 +32117,7 @@ type DeleteInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The ID of the Internet gateway. + // The ID of the internet gateway. // // InternetGatewayId is a required field InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` @@ -31053,7 +32158,6 @@ func (s *DeleteInternetGatewayInput) SetInternetGatewayId(v string) *DeleteInter return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGatewayOutput type DeleteInternetGatewayOutput struct { _ struct{} `type:"structure"` } @@ -31069,7 +32173,6 @@ func (s DeleteInternetGatewayOutput) GoString() string { } // Contains the parameters for DeleteKeyPair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteKeyPairRequest type DeleteKeyPairInput struct { _ struct{} `type:"structure"` @@ -31120,7 +32223,6 @@ func (s *DeleteKeyPairInput) SetKeyName(v string) *DeleteKeyPairInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteKeyPairOutput type DeleteKeyPairOutput struct { _ struct{} `type:"structure"` } @@ -31135,7 +32237,6 @@ func (s DeleteKeyPairOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateRequest type DeleteLaunchTemplateInput struct { _ struct{} `type:"structure"` @@ -31195,7 +32296,6 @@ func (s *DeleteLaunchTemplateInput) SetLaunchTemplateName(v string) *DeleteLaunc return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateResult type DeleteLaunchTemplateOutput struct { _ struct{} `type:"structure"` @@ -31219,7 +32319,6 @@ func (s *DeleteLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Delet return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateVersionsRequest type DeleteLaunchTemplateVersionsInput struct { _ struct{} `type:"structure"` @@ -31293,7 +32392,6 @@ func (s *DeleteLaunchTemplateVersionsInput) SetVersions(v []*string) *DeleteLaun return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateVersionsResult type DeleteLaunchTemplateVersionsOutput struct { _ struct{} `type:"structure"` @@ -31327,7 +32425,6 @@ func (s *DeleteLaunchTemplateVersionsOutput) SetUnsuccessfullyDeletedLaunchTempl } // Describes a launch template version that could not be deleted. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateVersionsResponseErrorItem type DeleteLaunchTemplateVersionsResponseErrorItem struct { _ struct{} `type:"structure"` @@ -31379,7 +32476,6 @@ func (s *DeleteLaunchTemplateVersionsResponseErrorItem) SetVersionNumber(v int64 } // Describes a launch template version that was successfully deleted. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateVersionsResponseSuccessItem type DeleteLaunchTemplateVersionsResponseSuccessItem struct { _ struct{} `type:"structure"` @@ -31422,7 +32518,6 @@ func (s *DeleteLaunchTemplateVersionsResponseSuccessItem) SetVersionNumber(v int } // Contains the parameters for DeleteNatGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNatGatewayRequest type DeleteNatGatewayInput struct { _ struct{} `type:"structure"` @@ -31462,7 +32557,6 @@ func (s *DeleteNatGatewayInput) SetNatGatewayId(v string) *DeleteNatGatewayInput } // Contains the output of DeleteNatGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNatGatewayResult type DeleteNatGatewayOutput struct { _ struct{} `type:"structure"` @@ -31487,7 +32581,6 @@ func (s *DeleteNatGatewayOutput) SetNatGatewayId(v string) *DeleteNatGatewayOutp } // Contains the parameters for DeleteNetworkAclEntry. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclEntryRequest type DeleteNetworkAclEntryInput struct { _ struct{} `type:"structure"` @@ -31566,7 +32659,6 @@ func (s *DeleteNetworkAclEntryInput) SetRuleNumber(v int64) *DeleteNetworkAclEnt return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclEntryOutput type DeleteNetworkAclEntryOutput struct { _ struct{} `type:"structure"` } @@ -31582,7 +32674,6 @@ func (s DeleteNetworkAclEntryOutput) GoString() string { } // Contains the parameters for DeleteNetworkAcl. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclRequest type DeleteNetworkAclInput struct { _ struct{} `type:"structure"` @@ -31633,7 +32724,6 @@ func (s *DeleteNetworkAclInput) SetNetworkAclId(v string) *DeleteNetworkAclInput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclOutput type DeleteNetworkAclOutput struct { _ struct{} `type:"structure"` } @@ -31649,7 +32739,6 @@ func (s DeleteNetworkAclOutput) GoString() string { } // Contains the parameters for DeleteNetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfaceRequest type DeleteNetworkInterfaceInput struct { _ struct{} `type:"structure"` @@ -31700,7 +32789,6 @@ func (s *DeleteNetworkInterfaceInput) SetNetworkInterfaceId(v string) *DeleteNet return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfaceOutput type DeleteNetworkInterfaceOutput struct { _ struct{} `type:"structure"` } @@ -31716,7 +32804,6 @@ func (s DeleteNetworkInterfaceOutput) GoString() string { } // Contains the parameters for DeleteNetworkInterfacePermission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionRequest type DeleteNetworkInterfacePermissionInput struct { _ struct{} `type:"structure"` @@ -31778,7 +32865,6 @@ func (s *DeleteNetworkInterfacePermissionInput) SetNetworkInterfacePermissionId( } // Contains the output for DeleteNetworkInterfacePermission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionResult type DeleteNetworkInterfacePermissionOutput struct { _ struct{} `type:"structure"` @@ -31803,7 +32889,6 @@ func (s *DeleteNetworkInterfacePermissionOutput) SetReturn(v bool) *DeleteNetwor } // Contains the parameters for DeletePlacementGroup. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroupRequest type DeletePlacementGroupInput struct { _ struct{} `type:"structure"` @@ -31854,7 +32939,6 @@ func (s *DeletePlacementGroupInput) SetGroupName(v string) *DeletePlacementGroup return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroupOutput type DeletePlacementGroupOutput struct { _ struct{} `type:"structure"` } @@ -31870,7 +32954,6 @@ func (s DeletePlacementGroupOutput) GoString() string { } // Contains the parameters for DeleteRoute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteRequest type DeleteRouteInput struct { _ struct{} `type:"structure"` @@ -31941,7 +33024,6 @@ func (s *DeleteRouteInput) SetRouteTableId(v string) *DeleteRouteInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteOutput type DeleteRouteOutput struct { _ struct{} `type:"structure"` } @@ -31957,7 +33039,6 @@ func (s DeleteRouteOutput) GoString() string { } // Contains the parameters for DeleteRouteTable. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteTableRequest type DeleteRouteTableInput struct { _ struct{} `type:"structure"` @@ -32008,7 +33089,6 @@ func (s *DeleteRouteTableInput) SetRouteTableId(v string) *DeleteRouteTableInput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteTableOutput type DeleteRouteTableOutput struct { _ struct{} `type:"structure"` } @@ -32024,7 +33104,6 @@ func (s DeleteRouteTableOutput) GoString() string { } // Contains the parameters for DeleteSecurityGroup. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSecurityGroupRequest type DeleteSecurityGroupInput struct { _ struct{} `type:"structure"` @@ -32070,7 +33149,6 @@ func (s *DeleteSecurityGroupInput) SetGroupName(v string) *DeleteSecurityGroupIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSecurityGroupOutput type DeleteSecurityGroupOutput struct { _ struct{} `type:"structure"` } @@ -32086,7 +33164,6 @@ func (s DeleteSecurityGroupOutput) GoString() string { } // Contains the parameters for DeleteSnapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSnapshotRequest type DeleteSnapshotInput struct { _ struct{} `type:"structure"` @@ -32137,7 +33214,6 @@ func (s *DeleteSnapshotInput) SetSnapshotId(v string) *DeleteSnapshotInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSnapshotOutput type DeleteSnapshotOutput struct { _ struct{} `type:"structure"` } @@ -32153,7 +33229,6 @@ func (s DeleteSnapshotOutput) GoString() string { } // Contains the parameters for DeleteSpotDatafeedSubscription. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSpotDatafeedSubscriptionRequest type DeleteSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` @@ -32180,7 +33255,6 @@ func (s *DeleteSpotDatafeedSubscriptionInput) SetDryRun(v bool) *DeleteSpotDataf return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSpotDatafeedSubscriptionOutput type DeleteSpotDatafeedSubscriptionOutput struct { _ struct{} `type:"structure"` } @@ -32196,7 +33270,6 @@ func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string { } // Contains the parameters for DeleteSubnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnetRequest type DeleteSubnetInput struct { _ struct{} `type:"structure"` @@ -32247,7 +33320,6 @@ func (s *DeleteSubnetInput) SetSubnetId(v string) *DeleteSubnetInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnetOutput type DeleteSubnetOutput struct { _ struct{} `type:"structure"` } @@ -32263,7 +33335,6 @@ func (s DeleteSubnetOutput) GoString() string { } // Contains the parameters for DeleteTags. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTagsRequest type DeleteTagsInput struct { _ struct{} `type:"structure"` @@ -32278,12 +33349,14 @@ type DeleteTagsInput struct { // Resources is a required field Resources []*string `locationName:"resourceId" type:"list" required:"true"` - // One or more tags to delete. If you omit this parameter, we delete all tags - // for the specified resources. Specify a tag key and an optional tag value - // to delete specific tags. If you specify a tag key without a tag value, we - // delete any tag with this key regardless of its value. If you specify a tag - // key with an empty string as the tag value, we delete the tag only if its - // value is an empty string. + // One or more tags to delete. Specify a tag key and an optional tag value to + // delete specific tags. If you specify a tag key without a tag value, we delete + // any tag with this key regardless of its value. If you specify a tag key with + // an empty string as the tag value, we delete the tag only if its value is + // an empty string. + // + // If you omit this parameter, we delete all user-defined tags for the specified + // resources. We do not delete AWS-generated tags (tags that have the aws: prefix). Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` } @@ -32328,7 +33401,6 @@ func (s *DeleteTagsInput) SetTags(v []*Tag) *DeleteTagsInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTagsOutput type DeleteTagsOutput struct { _ struct{} `type:"structure"` } @@ -32344,7 +33416,6 @@ func (s DeleteTagsOutput) GoString() string { } // Contains the parameters for DeleteVolume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVolumeRequest type DeleteVolumeInput struct { _ struct{} `type:"structure"` @@ -32395,7 +33466,6 @@ func (s *DeleteVolumeInput) SetVolumeId(v string) *DeleteVolumeInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVolumeOutput type DeleteVolumeOutput struct { _ struct{} `type:"structure"` } @@ -32410,7 +33480,6 @@ func (s DeleteVolumeOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointConnectionNotificationsRequest type DeleteVpcEndpointConnectionNotificationsInput struct { _ struct{} `type:"structure"` @@ -32461,7 +33530,6 @@ func (s *DeleteVpcEndpointConnectionNotificationsInput) SetDryRun(v bool) *Delet return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointConnectionNotificationsResult type DeleteVpcEndpointConnectionNotificationsOutput struct { _ struct{} `type:"structure"` @@ -32485,7 +33553,6 @@ func (s *DeleteVpcEndpointConnectionNotificationsOutput) SetUnsuccessful(v []*Un return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointServiceConfigurationsRequest type DeleteVpcEndpointServiceConfigurationsInput struct { _ struct{} `type:"structure"` @@ -32536,7 +33603,6 @@ func (s *DeleteVpcEndpointServiceConfigurationsInput) SetServiceIds(v []*string) return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointServiceConfigurationsResult type DeleteVpcEndpointServiceConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -32561,7 +33627,6 @@ func (s *DeleteVpcEndpointServiceConfigurationsOutput) SetUnsuccessful(v []*Unsu } // Contains the parameters for DeleteVpcEndpoints. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointsRequest type DeleteVpcEndpointsInput struct { _ struct{} `type:"structure"` @@ -32613,7 +33678,6 @@ func (s *DeleteVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DeleteVpcEndpo } // Contains the output of DeleteVpcEndpoints. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointsResult type DeleteVpcEndpointsOutput struct { _ struct{} `type:"structure"` @@ -32638,7 +33702,6 @@ func (s *DeleteVpcEndpointsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *Delet } // Contains the parameters for DeleteVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcRequest type DeleteVpcInput struct { _ struct{} `type:"structure"` @@ -32689,7 +33752,6 @@ func (s *DeleteVpcInput) SetVpcId(v string) *DeleteVpcInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcOutput type DeleteVpcOutput struct { _ struct{} `type:"structure"` } @@ -32705,7 +33767,6 @@ func (s DeleteVpcOutput) GoString() string { } // Contains the parameters for DeleteVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcPeeringConnectionRequest type DeleteVpcPeeringConnectionInput struct { _ struct{} `type:"structure"` @@ -32757,7 +33818,6 @@ func (s *DeleteVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *D } // Contains the output of DeleteVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcPeeringConnectionResult type DeleteVpcPeeringConnectionOutput struct { _ struct{} `type:"structure"` @@ -32782,7 +33842,6 @@ func (s *DeleteVpcPeeringConnectionOutput) SetReturn(v bool) *DeleteVpcPeeringCo } // Contains the parameters for DeleteVpnConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionRequest type DeleteVpnConnectionInput struct { _ struct{} `type:"structure"` @@ -32833,7 +33892,6 @@ func (s *DeleteVpnConnectionInput) SetVpnConnectionId(v string) *DeleteVpnConnec return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionOutput type DeleteVpnConnectionOutput struct { _ struct{} `type:"structure"` } @@ -32849,7 +33907,6 @@ func (s DeleteVpnConnectionOutput) GoString() string { } // Contains the parameters for DeleteVpnConnectionRoute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionRouteRequest type DeleteVpnConnectionRouteInput struct { _ struct{} `type:"structure"` @@ -32902,7 +33959,6 @@ func (s *DeleteVpnConnectionRouteInput) SetVpnConnectionId(v string) *DeleteVpnC return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionRouteOutput type DeleteVpnConnectionRouteOutput struct { _ struct{} `type:"structure"` } @@ -32918,7 +33974,6 @@ func (s DeleteVpnConnectionRouteOutput) GoString() string { } // Contains the parameters for DeleteVpnGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnGatewayRequest type DeleteVpnGatewayInput struct { _ struct{} `type:"structure"` @@ -32969,7 +34024,6 @@ func (s *DeleteVpnGatewayInput) SetVpnGatewayId(v string) *DeleteVpnGatewayInput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnGatewayOutput type DeleteVpnGatewayOutput struct { _ struct{} `type:"structure"` } @@ -32985,7 +34039,6 @@ func (s DeleteVpnGatewayOutput) GoString() string { } // Contains the parameters for DeregisterImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterImageRequest type DeregisterImageInput struct { _ struct{} `type:"structure"` @@ -33036,7 +34089,6 @@ func (s *DeregisterImageInput) SetImageId(v string) *DeregisterImageInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterImageOutput type DeregisterImageOutput struct { _ struct{} `type:"structure"` } @@ -33052,7 +34104,6 @@ func (s DeregisterImageOutput) GoString() string { } // Contains the parameters for DescribeAccountAttributes. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAccountAttributesRequest type DescribeAccountAttributesInput struct { _ struct{} `type:"structure"` @@ -33089,7 +34140,6 @@ func (s *DescribeAccountAttributesInput) SetDryRun(v bool) *DescribeAccountAttri } // Contains the output of DescribeAccountAttributes. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAccountAttributesResult type DescribeAccountAttributesOutput struct { _ struct{} `type:"structure"` @@ -33114,7 +34164,6 @@ func (s *DescribeAccountAttributesOutput) SetAccountAttributes(v []*AccountAttri } // Contains the parameters for DescribeAddresses. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddressesRequest type DescribeAddressesInput struct { _ struct{} `type:"structure"` @@ -33150,6 +34199,16 @@ type DescribeAddressesInput struct { // the Elastic IP address. // // * public-ip - The Elastic IP address. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // [EC2-Classic] One or more Elastic IP addresses. @@ -33193,7 +34252,6 @@ func (s *DescribeAddressesInput) SetPublicIps(v []*string) *DescribeAddressesInp } // Contains the output of DescribeAddresses. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddressesResult type DescribeAddressesOutput struct { _ struct{} `type:"structure"` @@ -33217,8 +34275,67 @@ func (s *DescribeAddressesOutput) SetAddresses(v []*Address) *DescribeAddressesO return s } +type DescribeAggregateIdFormatInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeAggregateIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAggregateIdFormatInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAggregateIdFormatInput) SetDryRun(v bool) *DescribeAggregateIdFormatInput { + s.DryRun = &v + return s +} + +type DescribeAggregateIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about each resource's ID format. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` + + // Indicates whether all resource types in the region are configured to use + // longer IDs. This value is only true if all users are configured to use longer + // IDs for all resources types in the region. + UseLongIdsAggregated *bool `locationName:"useLongIdsAggregated" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAggregateIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAggregateIdFormatOutput) GoString() string { + return s.String() +} + +// SetStatuses sets the Statuses field's value. +func (s *DescribeAggregateIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeAggregateIdFormatOutput { + s.Statuses = v + return s +} + +// SetUseLongIdsAggregated sets the UseLongIdsAggregated field's value. +func (s *DescribeAggregateIdFormatOutput) SetUseLongIdsAggregated(v bool) *DescribeAggregateIdFormatOutput { + s.UseLongIdsAggregated = &v + return s +} + // Contains the parameters for DescribeAvailabilityZones. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAvailabilityZonesRequest type DescribeAvailabilityZonesInput struct { _ struct{} `type:"structure"` @@ -33274,7 +34391,6 @@ func (s *DescribeAvailabilityZonesInput) SetZoneNames(v []*string) *DescribeAvai } // Contains the output of DescribeAvailabiltyZones. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAvailabilityZonesResult type DescribeAvailabilityZonesOutput struct { _ struct{} `type:"structure"` @@ -33299,7 +34415,6 @@ func (s *DescribeAvailabilityZonesOutput) SetAvailabilityZones(v []*Availability } // Contains the parameters for DescribeBundleTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasksRequest type DescribeBundleTasksInput struct { _ struct{} `type:"structure"` @@ -33369,7 +34484,6 @@ func (s *DescribeBundleTasksInput) SetFilters(v []*Filter) *DescribeBundleTasksI } // Contains the output of DescribeBundleTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasksResult type DescribeBundleTasksOutput struct { _ struct{} `type:"structure"` @@ -33394,7 +34508,6 @@ func (s *DescribeBundleTasksOutput) SetBundleTasks(v []*BundleTask) *DescribeBun } // Contains the parameters for DescribeClassicLinkInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClassicLinkInstancesRequest type DescribeClassicLinkInstancesInput struct { _ struct{} `type:"structure"` @@ -33411,20 +34524,19 @@ type DescribeClassicLinkInstancesInput struct { // // * instance-id - The ID of the instance. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * vpc-id - The ID of the VPC to which the instance is linked. // - // * vpc-id - The ID of the VPC that the instance is linked to. + // vpc-id - The ID of the VPC that the instance is linked to. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. @@ -33433,7 +34545,7 @@ type DescribeClassicLinkInstancesInput struct { // The maximum number of results to return for the request in a single page. // The remaining results of the initial request can be seen by sending another // request with the returned NextToken value. This value can be between 5 and - // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // 1000. If MaxResults is given a value larger than 1000, only 1000 results // are returned. You cannot specify this parameter and the instance IDs parameter // in the same request. // @@ -33485,7 +34597,6 @@ func (s *DescribeClassicLinkInstancesInput) SetNextToken(v string) *DescribeClas } // Contains the output of DescribeClassicLinkInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClassicLinkInstancesResult type DescribeClassicLinkInstancesOutput struct { _ struct{} `type:"structure"` @@ -33520,7 +34631,6 @@ func (s *DescribeClassicLinkInstancesOutput) SetNextToken(v string) *DescribeCla } // Contains the parameters for DescribeConversionTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasksRequest type DescribeConversionTasksInput struct { _ struct{} `type:"structure"` @@ -33557,7 +34667,6 @@ func (s *DescribeConversionTasksInput) SetDryRun(v bool) *DescribeConversionTask } // Contains the output for DescribeConversionTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasksResult type DescribeConversionTasksOutput struct { _ struct{} `type:"structure"` @@ -33582,7 +34691,6 @@ func (s *DescribeConversionTasksOutput) SetConversionTasks(v []*ConversionTask) } // Contains the parameters for DescribeCustomerGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGatewaysRequest type DescribeCustomerGatewaysInput struct { _ struct{} `type:"structure"` @@ -33613,21 +34721,15 @@ type DescribeCustomerGatewaysInput struct { // * type - The type of customer gateway. Currently, the only supported type // is ipsec.1. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` } @@ -33660,7 +34762,6 @@ func (s *DescribeCustomerGatewaysInput) SetFilters(v []*Filter) *DescribeCustome } // Contains the output of DescribeCustomerGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGatewaysResult type DescribeCustomerGatewaysOutput struct { _ struct{} `type:"structure"` @@ -33685,7 +34786,6 @@ func (s *DescribeCustomerGatewaysOutput) SetCustomerGateways(v []*CustomerGatewa } // Contains the parameters for DescribeDhcpOptions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeDhcpOptionsRequest type DescribeDhcpOptionsInput struct { _ struct{} `type:"structure"` @@ -33708,21 +34808,15 @@ type DescribeDhcpOptionsInput struct { // // * value - The value for one of the options. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` } @@ -33755,7 +34849,6 @@ func (s *DescribeDhcpOptionsInput) SetFilters(v []*Filter) *DescribeDhcpOptionsI } // Contains the output of DescribeDhcpOptions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeDhcpOptionsResult type DescribeDhcpOptionsOutput struct { _ struct{} `type:"structure"` @@ -33779,7 +34872,6 @@ func (s *DescribeDhcpOptionsOutput) SetDhcpOptions(v []*DhcpOptions) *DescribeDh return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeEgressOnlyInternetGatewaysRequest type DescribeEgressOnlyInternetGatewaysInput struct { _ struct{} `type:"structure"` @@ -33789,12 +34881,12 @@ type DescribeEgressOnlyInternetGatewaysInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // One or more egress-only Internet gateway IDs. + // One or more egress-only internet gateway IDs. EgressOnlyInternetGatewayIds []*string `locationName:"EgressOnlyInternetGatewayId" locationNameList:"item" type:"list"` // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // NextToken value. This value can be between 5 and 1000; if MaxResults is given + // NextToken value. This value can be between 5 and 1000. If MaxResults is given // a value larger than 1000, only 1000 results are returned. MaxResults *int64 `type:"integer"` @@ -33836,11 +34928,10 @@ func (s *DescribeEgressOnlyInternetGatewaysInput) SetNextToken(v string) *Descri return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeEgressOnlyInternetGatewaysResult type DescribeEgressOnlyInternetGatewaysOutput struct { _ struct{} `type:"structure"` - // Information about the egress-only Internet gateways. + // Information about the egress-only internet gateways. EgressOnlyInternetGateways []*EgressOnlyInternetGateway `locationName:"egressOnlyInternetGatewaySet" locationNameList:"item" type:"list"` // The token to use to retrieve the next page of results. @@ -33869,7 +34960,6 @@ func (s *DescribeEgressOnlyInternetGatewaysOutput) SetNextToken(v string) *Descr return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpusRequest type DescribeElasticGpusInput struct { _ struct{} `type:"structure"` @@ -33944,7 +35034,6 @@ func (s *DescribeElasticGpusInput) SetNextToken(v string) *DescribeElasticGpusIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpusResult type DescribeElasticGpusOutput struct { _ struct{} `type:"structure"` @@ -33990,7 +35079,6 @@ func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusO } // Contains the parameters for DescribeExportTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasksRequest type DescribeExportTasksInput struct { _ struct{} `type:"structure"` @@ -34015,7 +35103,6 @@ func (s *DescribeExportTasksInput) SetExportTaskIds(v []*string) *DescribeExport } // Contains the output for DescribeExportTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasksResult type DescribeExportTasksOutput struct { _ struct{} `type:"structure"` @@ -34039,22 +35126,420 @@ func (s *DescribeExportTasksOutput) SetExportTasks(v []*ExportTask) *DescribeExp return s } +type DescribeFleetHistoryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `type:"string" enum:"FleetEventType"` + + // The ID of the EC2 Fleet. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The start date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s DescribeFleetHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetHistoryInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFleetHistoryInput) SetDryRun(v bool) *DescribeFleetHistoryInput { + s.DryRun = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *DescribeFleetHistoryInput) SetEventType(v string) *DescribeFleetHistoryInput { + s.EventType = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetHistoryInput) SetFleetId(v string) *DescribeFleetHistoryInput { + s.FleetId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFleetHistoryInput) SetMaxResults(v int64) *DescribeFleetHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetHistoryInput) SetNextToken(v string) *DescribeFleetHistoryInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeFleetHistoryInput) SetStartTime(v time.Time) *DescribeFleetHistoryInput { + s.StartTime = &v + return s +} + +type DescribeFleetHistoryOutput struct { + _ struct{} `type:"structure"` + + // The ID of the EC Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // Information about the events in the history of the EC2 Fleet. + HistoryRecords []*HistoryRecordEntry `locationName:"historyRecordSet" locationNameList:"item" type:"list"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The start date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s DescribeFleetHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetHistoryOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetHistoryOutput) SetFleetId(v string) *DescribeFleetHistoryOutput { + s.FleetId = &v + return s +} + +// SetHistoryRecords sets the HistoryRecords field's value. +func (s *DescribeFleetHistoryOutput) SetHistoryRecords(v []*HistoryRecordEntry) *DescribeFleetHistoryOutput { + s.HistoryRecords = v + return s +} + +// SetLastEvaluatedTime sets the LastEvaluatedTime field's value. +func (s *DescribeFleetHistoryOutput) SetLastEvaluatedTime(v time.Time) *DescribeFleetHistoryOutput { + s.LastEvaluatedTime = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetHistoryOutput) SetNextToken(v string) *DescribeFleetHistoryOutput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeFleetHistoryOutput) SetStartTime(v time.Time) *DescribeFleetHistoryOutput { + s.StartTime = &v + return s +} + +type DescribeFleetInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * instance-type - The instance type. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The ID of the EC2 Fleet. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetInstancesInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFleetInstancesInput) SetDryRun(v bool) *DescribeFleetInstancesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFleetInstancesInput) SetFilters(v []*Filter) *DescribeFleetInstancesInput { + s.Filters = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetInstancesInput) SetFleetId(v string) *DescribeFleetInstancesInput { + s.FleetId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFleetInstancesInput) SetMaxResults(v int64) *DescribeFleetInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetInstancesInput) SetNextToken(v string) *DescribeFleetInstancesInput { + s.NextToken = &v + return s +} + +type DescribeFleetInstancesOutput struct { + _ struct{} `type:"structure"` + + // The running instances. This list is refreshed periodically and might be out + // of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetInstancesOutput) GoString() string { + return s.String() +} + +// SetActiveInstances sets the ActiveInstances field's value. +func (s *DescribeFleetInstancesOutput) SetActiveInstances(v []*ActiveInstance) *DescribeFleetInstancesOutput { + s.ActiveInstances = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetInstancesOutput) SetFleetId(v string) *DescribeFleetInstancesOutput { + s.FleetId = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetInstancesOutput) SetNextToken(v string) *DescribeFleetInstancesOutput { + s.NextToken = &v + return s +} + +type DescribeFleetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * activity-status - The progress of the EC2 Fleet ( error | pending-fulfillment + // | pending-termination | fulfilled). + // + // * excess-capacity-termination-policy - Indicates whether to terminate + // running instances if the target capacity is decreased below the current + // EC2 Fleet size (true | false). + // + // * fleet-state - The state of the EC2 Fleet (submitted | active | deleted + // | failed | deleted-running | deleted-terminating | modifying). + // + // * replace-unhealthy-instances - Indicates whether EC2 Fleet should replace + // unhealthy instances (true | false). + // + // * type - The type of request (request | maintain). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The ID of the EC2 Fleets. + FleetIds []*string `locationName:"FleetId" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFleetsInput) SetDryRun(v bool) *DescribeFleetsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFleetsInput) SetFilters(v []*Filter) *DescribeFleetsInput { + s.Filters = v + return s +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DescribeFleetsInput) SetFleetIds(v []*string) *DescribeFleetsInput { + s.FleetIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFleetsInput) SetMaxResults(v int64) *DescribeFleetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetsInput) SetNextToken(v string) *DescribeFleetsInput { + s.NextToken = &v + return s +} + +type DescribeFleetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Fleets. + Fleets []*FleetData `locationName:"fleetSet" locationNameList:"item" type:"list"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsOutput) GoString() string { + return s.String() +} + +// SetFleets sets the Fleets field's value. +func (s *DescribeFleetsOutput) SetFleets(v []*FleetData) *DescribeFleetsOutput { + s.Fleets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetsOutput) SetNextToken(v string) *DescribeFleetsOutput { + s.NextToken = &v + return s +} + // Contains the parameters for DescribeFlowLogs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFlowLogsRequest type DescribeFlowLogsInput struct { _ struct{} `type:"structure"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + // One or more filters. // // * deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). // + // * log-destination-type - The type of destination to which the flow log + // publishes data. Possible destination types include cloud-watch-logs and + // S3. + // // * flow-log-id - The ID of the flow log. // // * log-group-name - The name of the log group. // // * resource-id - The ID of the VPC, subnet, or network interface. // - // * traffic-type - The type of traffic (ACCEPT | REJECT | ALL) + // * traffic-type - The type of traffic (ACCEPT | REJECT | ALL). Filter []*Filter `locationNameList:"Filter" type:"list"` // One or more flow log IDs. @@ -34062,7 +35547,7 @@ type DescribeFlowLogsInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // NextToken value. This value can be between 5 and 1000; if MaxResults is given + // NextToken value. This value can be between 5 and 1000. If MaxResults is given // a value larger than 1000, only 1000 results are returned. You cannot specify // this parameter and the flow log IDs parameter in the same request. MaxResults *int64 `type:"integer"` @@ -34081,6 +35566,12 @@ func (s DescribeFlowLogsInput) GoString() string { return s.String() } +// SetDryRun sets the DryRun field's value. +func (s *DescribeFlowLogsInput) SetDryRun(v bool) *DescribeFlowLogsInput { + s.DryRun = &v + return s +} + // SetFilter sets the Filter field's value. func (s *DescribeFlowLogsInput) SetFilter(v []*Filter) *DescribeFlowLogsInput { s.Filter = v @@ -34106,7 +35597,6 @@ func (s *DescribeFlowLogsInput) SetNextToken(v string) *DescribeFlowLogsInput { } // Contains the output of DescribeFlowLogs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFlowLogsResult type DescribeFlowLogsOutput struct { _ struct{} `type:"structure"` @@ -34140,7 +35630,6 @@ func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeRequest type DescribeFpgaImageAttributeInput struct { _ struct{} `type:"structure"` @@ -34205,7 +35694,6 @@ func (s *DescribeFpgaImageAttributeInput) SetFpgaImageId(v string) *DescribeFpga return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeResult type DescribeFpgaImageAttributeOutput struct { _ struct{} `type:"structure"` @@ -34229,7 +35717,6 @@ func (s *DescribeFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAtt return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesRequest type DescribeFpgaImagesInput struct { _ struct{} `type:"structure"` @@ -34258,21 +35745,15 @@ type DescribeFpgaImagesInput struct { // // * state - The state of the AFI (pending | failed | available | unavailable). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * update-time - The time of the most recent update. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -34353,7 +35834,6 @@ func (s *DescribeFpgaImagesInput) SetOwners(v []*string) *DescribeFpgaImagesInpu return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesResult type DescribeFpgaImagesOutput struct { _ struct{} `type:"structure"` @@ -34387,28 +35867,28 @@ func (s *DescribeFpgaImagesOutput) SetNextToken(v string) *DescribeFpgaImagesOut return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferingsRequest type DescribeHostReservationOfferingsInput struct { _ struct{} `type:"structure"` // One or more filters. // - // * instance-family - The instance family of the offering (e.g., m4). + // * instance-family - The instance family of the offering (for example, + // m4). // // * payment-option - The payment option (NoUpfront | PartialUpfront | AllUpfront). Filter []*Filter `locationNameList:"Filter" type:"list"` - // This is the maximum duration of the reservation you'd like to purchase, specified - // in seconds. Reservations are available in one-year and three-year terms. - // The number of seconds specified must be the number of seconds in a year (365x24x60x60) + // This is the maximum duration of the reservation to purchase, specified in + // seconds. Reservations are available in one-year and three-year terms. The + // number of seconds specified must be the number of seconds in a year (365x24x60x60) // times one of the supported durations (1 or 3). For example, specify 94608000 // for three years. MaxDuration *int64 `type:"integer"` // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. This value can be between 5 and 500; if maxResults is given - // a larger value than 500, you will receive an error. + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. MaxResults *int64 `type:"integer"` // This is the minimum duration of the reservation you'd like to purchase, specified @@ -34471,7 +35951,6 @@ func (s *DescribeHostReservationOfferingsInput) SetOfferingId(v string) *Describ return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferingsResult type DescribeHostReservationOfferingsOutput struct { _ struct{} `type:"structure"` @@ -34505,13 +35984,12 @@ func (s *DescribeHostReservationOfferingsOutput) SetOfferingSet(v []*HostOfferin return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationsRequest type DescribeHostReservationsInput struct { _ struct{} `type:"structure"` // One or more filters. // - // * instance-family - The instance family (e.g., m4). + // * instance-family - The instance family (for example, m4). // // * payment-option - The payment option (NoUpfront | PartialUpfront | AllUpfront). // @@ -34524,8 +36002,8 @@ type DescribeHostReservationsInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. This value can be between 5 and 500; if maxResults is given - // a larger value than 500, you will receive an error. + // nextToken value. This value can be between 5 and 500.If maxResults is given + // a larger value than 500, you receive an error. MaxResults *int64 `type:"integer"` // The token to use to retrieve the next page of results. @@ -34566,7 +36044,6 @@ func (s *DescribeHostReservationsInput) SetNextToken(v string) *DescribeHostRese return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationsResult type DescribeHostReservationsOutput struct { _ struct{} `type:"structure"` @@ -34601,27 +36078,30 @@ func (s *DescribeHostReservationsOutput) SetNextToken(v string) *DescribeHostRes } // Contains the parameters for DescribeHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostsRequest type DescribeHostsInput struct { _ struct{} `type:"structure"` // One or more filters. // - // * instance-type - The instance type size that the Dedicated Host is configured - // to support. - // // * auto-placement - Whether auto-placement is enabled or disabled (on | // off). // + // * availability-zone - The Availability Zone of the host. + // + // * client-token - The idempotency token that you provided when you allocated + // the host. + // // * host-reservation-id - The ID of the reservation assigned to this host. // - // * client-token - The idempotency token you provided when you launched - // the instance + // * instance-type - The instance type size that the Dedicated Host is configured + // to support. // - // * state- The allocation state of the Dedicated Host (available | under-assessment + // * state - The allocation state of the Dedicated Host (available | under-assessment // | permanent-failure | released | released-permanent-failure). // - // * availability-zone - The Availability Zone of the host. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` // The IDs of the Dedicated Hosts. The IDs are used for targeted instance launches. @@ -34629,9 +36109,9 @@ type DescribeHostsInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. This value can be between 5 and 500; if maxResults is given - // a larger value than 500, you will receive an error. You cannot specify this - // parameter and the host IDs parameter in the same request. + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. You cannot specify this parameter + // and the host IDs parameter in the same request. MaxResults *int64 `locationName:"maxResults" type:"integer"` // The token to retrieve the next page of results. @@ -34673,7 +36153,6 @@ func (s *DescribeHostsInput) SetNextToken(v string) *DescribeHostsInput { } // Contains the output of DescribeHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostsResult type DescribeHostsOutput struct { _ struct{} `type:"structure"` @@ -34707,7 +36186,6 @@ func (s *DescribeHostsOutput) SetNextToken(v string) *DescribeHostsOutput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociationsRequest type DescribeIamInstanceProfileAssociationsInput struct { _ struct{} `type:"structure"` @@ -34780,7 +36258,6 @@ func (s *DescribeIamInstanceProfileAssociationsInput) SetNextToken(v string) *De return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociationsResult type DescribeIamInstanceProfileAssociationsOutput struct { _ struct{} `type:"structure"` @@ -34815,11 +36292,16 @@ func (s *DescribeIamInstanceProfileAssociationsOutput) SetNextToken(v string) *D } // Contains the parameters for DescribeIdFormat. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdFormatRequest type DescribeIdFormatInput struct { _ struct{} `type:"structure"` - // The type of resource: instance | reservation | snapshot | volume + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | instance | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | reservation + // | route-table | route-table-association | security-group | snapshot | subnet + // | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association + // | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway Resource *string `type:"string"` } @@ -34840,7 +36322,6 @@ func (s *DescribeIdFormatInput) SetResource(v string) *DescribeIdFormatInput { } // Contains the output of DescribeIdFormat. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdFormatResult type DescribeIdFormatOutput struct { _ struct{} `type:"structure"` @@ -34865,7 +36346,6 @@ func (s *DescribeIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeIdFormatOut } // Contains the parameters for DescribeIdentityIdFormat. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdentityIdFormatRequest type DescribeIdentityIdFormatInput struct { _ struct{} `type:"structure"` @@ -34875,7 +36355,13 @@ type DescribeIdentityIdFormatInput struct { // PrincipalArn is a required field PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` - // The type of resource: instance | reservation | snapshot | volume + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | instance | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | reservation + // | route-table | route-table-association | security-group | snapshot | subnet + // | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association + // | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway Resource *string `locationName:"resource" type:"string"` } @@ -34915,7 +36401,6 @@ func (s *DescribeIdentityIdFormatInput) SetResource(v string) *DescribeIdentityI } // Contains the output of DescribeIdentityIdFormat. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdentityIdFormatResult type DescribeIdentityIdFormatOutput struct { _ struct{} `type:"structure"` @@ -34940,7 +36425,6 @@ func (s *DescribeIdentityIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeIde } // Contains the parameters for DescribeImageAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImageAttributeRequest type DescribeImageAttributeInput struct { _ struct{} `type:"structure"` @@ -35010,7 +36494,6 @@ func (s *DescribeImageAttributeInput) SetImageId(v string) *DescribeImageAttribu } // Describes an image attribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImageAttribute type DescribeImageAttributeOutput struct { _ struct{} `type:"structure"` @@ -35099,7 +36582,6 @@ func (s *DescribeImageAttributeOutput) SetSriovNetSupport(v *AttributeValue) *De } // Contains the parameters for DescribeImages. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImagesRequest type DescribeImagesInput struct { _ struct{} `type:"structure"` @@ -35179,21 +36661,15 @@ type DescribeImagesInput struct { // * sriov-net-support - A value of simple indicates that enhanced networking // with the Intel 82599 VF interface is enabled. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * virtualization-type - The virtualization type (paravirtual | hvm). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -35251,7 +36727,6 @@ func (s *DescribeImagesInput) SetOwners(v []*string) *DescribeImagesInput { } // Contains the output of DescribeImages. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImagesResult type DescribeImagesOutput struct { _ struct{} `type:"structure"` @@ -35276,7 +36751,6 @@ func (s *DescribeImagesOutput) SetImages(v []*Image) *DescribeImagesOutput { } // Contains the parameters for DescribeImportImageTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportImageTasksRequest type DescribeImportImageTasksInput struct { _ struct{} `type:"structure"` @@ -35342,7 +36816,6 @@ func (s *DescribeImportImageTasksInput) SetNextToken(v string) *DescribeImportIm } // Contains the output for DescribeImportImageTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportImageTasksResult type DescribeImportImageTasksOutput struct { _ struct{} `type:"structure"` @@ -35378,7 +36851,6 @@ func (s *DescribeImportImageTasksOutput) SetNextToken(v string) *DescribeImportI } // Contains the parameters for DescribeImportSnapshotTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportSnapshotTasksRequest type DescribeImportSnapshotTasksInput struct { _ struct{} `type:"structure"` @@ -35443,7 +36915,6 @@ func (s *DescribeImportSnapshotTasksInput) SetNextToken(v string) *DescribeImpor } // Contains the output for DescribeImportSnapshotTasks. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportSnapshotTasksResult type DescribeImportSnapshotTasksOutput struct { _ struct{} `type:"structure"` @@ -35479,7 +36950,6 @@ func (s *DescribeImportSnapshotTasksOutput) SetNextToken(v string) *DescribeImpo } // Contains the parameters for DescribeInstanceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceAttributeRequest type DescribeInstanceAttributeInput struct { _ struct{} `type:"structure"` @@ -35547,7 +37017,6 @@ func (s *DescribeInstanceAttributeInput) SetInstanceId(v string) *DescribeInstan } // Describes an instance attribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceAttribute type DescribeInstanceAttributeOutput struct { _ struct{} `type:"structure"` @@ -35702,7 +37171,6 @@ func (s *DescribeInstanceAttributeOutput) SetUserData(v *AttributeValue) *Descri return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceCreditSpecificationsRequest type DescribeInstanceCreditSpecificationsInput struct { _ struct{} `type:"structure"` @@ -35774,7 +37242,6 @@ func (s *DescribeInstanceCreditSpecificationsInput) SetNextToken(v string) *Desc return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceCreditSpecificationsResult type DescribeInstanceCreditSpecificationsOutput struct { _ struct{} `type:"structure"` @@ -35809,7 +37276,6 @@ func (s *DescribeInstanceCreditSpecificationsOutput) SetNextToken(v string) *Des } // Contains the parameters for DescribeInstanceStatus. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatusRequest type DescribeInstanceStatusInput struct { _ struct{} `type:"structure"` @@ -35835,7 +37301,7 @@ type DescribeInstanceStatusInput struct { // example, 2014-09-15T17:15:20.000Z). // // * instance-state-code - The code for the instance state, as a 16-bit unsigned - // integer. The high byte is an opaque internal value and should be ignored. + // integer. The high byte is used for internal purposes and should be ignored. // The low byte is set based on the state represented. The valid values are // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), // and 80 (stopped). @@ -35926,7 +37392,6 @@ func (s *DescribeInstanceStatusInput) SetNextToken(v string) *DescribeInstanceSt } // Contains the output of DescribeInstanceStatus. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatusResult type DescribeInstanceStatusOutput struct { _ struct{} `type:"structure"` @@ -35961,7 +37426,6 @@ func (s *DescribeInstanceStatusOutput) SetNextToken(v string) *DescribeInstanceS } // Contains the parameters for DescribeInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstancesRequest type DescribeInstancesInput struct { _ struct{} `type:"structure"` @@ -36021,7 +37485,7 @@ type DescribeInstancesInput struct { // Scheduled Instance (spot | scheduled). // // * instance-state-code - The state of the instance, as a 16-bit unsigned - // integer. The high byte is an opaque internal value and should be ignored. + // integer. The high byte is used for internal purposes and should be ignored. // The low byte is set based on the state represented. The valid values are: // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), // and 80 (stopped). @@ -36188,20 +37652,15 @@ type DescribeInstancesInput struct { // // * subnet-id - The ID of the subnet for the instance. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of the tag's key). If you want to - // list only resources where Purpose is X, see the tag:key=value filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. // // * tenancy - The tenancy of an instance (dedicated | default | host). // @@ -36219,7 +37678,7 @@ type DescribeInstancesInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. This // value can be between 5 and 1000. You cannot specify this parameter and the - // instance IDs parameter or tag filters in the same call. + // instance IDs parameter in the same call. MaxResults *int64 `locationName:"maxResults" type:"integer"` // The token to request the next page of results. @@ -36267,7 +37726,6 @@ func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput } // Contains the output of DescribeInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstancesResult type DescribeInstancesOutput struct { _ struct{} `type:"structure"` @@ -36302,7 +37760,6 @@ func (s *DescribeInstancesOutput) SetReservations(v []*Reservation) *DescribeIns } // Contains the parameters for DescribeInternetGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInternetGatewaysRequest type DescribeInternetGatewaysInput struct { _ struct{} `type:"structure"` @@ -36321,26 +37778,20 @@ type DescribeInternetGatewaysInput struct { // // * internet-gateway-id - The ID of the Internet gateway. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // One or more Internet gateway IDs. + // One or more internet gateway IDs. // - // Default: Describes all your Internet gateways. + // Default: Describes all your internet gateways. InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"` } @@ -36373,11 +37824,10 @@ func (s *DescribeInternetGatewaysInput) SetInternetGatewayIds(v []*string) *Desc } // Contains the output of DescribeInternetGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInternetGatewaysResult type DescribeInternetGatewaysOutput struct { _ struct{} `type:"structure"` - // Information about one or more Internet gateways. + // Information about one or more internet gateways. InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` } @@ -36398,7 +37848,6 @@ func (s *DescribeInternetGatewaysOutput) SetInternetGateways(v []*InternetGatewa } // Contains the parameters for DescribeKeyPairs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairsRequest type DescribeKeyPairsInput struct { _ struct{} `type:"structure"` @@ -36450,7 +37899,6 @@ func (s *DescribeKeyPairsInput) SetKeyNames(v []*string) *DescribeKeyPairsInput } // Contains the output of DescribeKeyPairs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairsResult type DescribeKeyPairsOutput struct { _ struct{} `type:"structure"` @@ -36474,7 +37922,6 @@ func (s *DescribeKeyPairsOutput) SetKeyPairs(v []*KeyPairInfo) *DescribeKeyPairs return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplateVersionsRequest type DescribeLaunchTemplateVersionsInput struct { _ struct{} `type:"structure"` @@ -36515,7 +37962,7 @@ type DescribeLaunchTemplateVersionsInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. This - // value can be between 5 and 1000. + // value can be between 1 and 200. MaxResults *int64 `type:"integer"` // The version number up to which to describe launch template versions. @@ -36608,7 +38055,6 @@ func (s *DescribeLaunchTemplateVersionsInput) SetVersions(v []*string) *Describe return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplateVersionsResult type DescribeLaunchTemplateVersionsOutput struct { _ struct{} `type:"structure"` @@ -36642,7 +38088,6 @@ func (s *DescribeLaunchTemplateVersionsOutput) SetNextToken(v string) *DescribeL return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplatesRequest type DescribeLaunchTemplatesInput struct { _ struct{} `type:"structure"` @@ -36657,6 +38102,16 @@ type DescribeLaunchTemplatesInput struct { // * create-time - The time the launch template was created. // // * launch-template-name - The name of the launch template. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more launch template IDs. @@ -36720,7 +38175,6 @@ func (s *DescribeLaunchTemplatesInput) SetNextToken(v string) *DescribeLaunchTem return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplatesResult type DescribeLaunchTemplatesOutput struct { _ struct{} `type:"structure"` @@ -36755,7 +38209,6 @@ func (s *DescribeLaunchTemplatesOutput) SetNextToken(v string) *DescribeLaunchTe } // Contains the parameters for DescribeMovingAddresses. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMovingAddressesRequest type DescribeMovingAddressesInput struct { _ struct{} `type:"structure"` @@ -36827,7 +38280,6 @@ func (s *DescribeMovingAddressesInput) SetPublicIps(v []*string) *DescribeMoving } // Contains the output of DescribeMovingAddresses. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMovingAddressesResult type DescribeMovingAddressesOutput struct { _ struct{} `type:"structure"` @@ -36862,7 +38314,6 @@ func (s *DescribeMovingAddressesOutput) SetNextToken(v string) *DescribeMovingAd } // Contains the parameters for DescribeNatGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGatewaysRequest type DescribeNatGatewaysInput struct { _ struct{} `type:"structure"` @@ -36875,21 +38326,15 @@ type DescribeNatGatewaysInput struct { // // * subnet-id - The ID of the subnet in which the NAT gateway resides. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC in which the NAT gateway resides. Filter []*Filter `locationNameList:"Filter" type:"list"` @@ -36944,7 +38389,6 @@ func (s *DescribeNatGatewaysInput) SetNextToken(v string) *DescribeNatGatewaysIn } // Contains the output of DescribeNatGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGatewaysResult type DescribeNatGatewaysOutput struct { _ struct{} `type:"structure"` @@ -36979,7 +38423,6 @@ func (s *DescribeNatGatewaysOutput) SetNextToken(v string) *DescribeNatGatewaysO } // Contains the parameters for DescribeNetworkAcls. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkAclsRequest type DescribeNetworkAclsInput struct { _ struct{} `type:"structure"` @@ -37027,21 +38470,15 @@ type DescribeNetworkAclsInput struct { // // * network-acl-id - The ID of the network ACL. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC for the network ACL. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -37081,7 +38518,6 @@ func (s *DescribeNetworkAclsInput) SetNetworkAclIds(v []*string) *DescribeNetwor } // Contains the output of DescribeNetworkAcls. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkAclsResult type DescribeNetworkAclsOutput struct { _ struct{} `type:"structure"` @@ -37106,7 +38542,6 @@ func (s *DescribeNetworkAclsOutput) SetNetworkAcls(v []*NetworkAcl) *DescribeNet } // Contains the parameters for DescribeNetworkInterfaceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaceAttributeRequest type DescribeNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` @@ -37167,7 +38602,6 @@ func (s *DescribeNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) } // Contains the output of DescribeNetworkInterfaceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaceAttributeResult type DescribeNetworkInterfaceAttributeOutput struct { _ struct{} `type:"structure"` @@ -37228,7 +38662,6 @@ func (s *DescribeNetworkInterfaceAttributeOutput) SetSourceDestCheck(v *Attribut } // Contains the parameters for DescribeNetworkInterfacePermissions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsRequest type DescribeNetworkInterfacePermissionsInput struct { _ struct{} `type:"structure"` @@ -37295,7 +38728,6 @@ func (s *DescribeNetworkInterfacePermissionsInput) SetNextToken(v string) *Descr } // Contains the output for DescribeNetworkInterfacePermissions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsResult type DescribeNetworkInterfacePermissionsOutput struct { _ struct{} `type:"structure"` @@ -37329,7 +38761,6 @@ func (s *DescribeNetworkInterfacePermissionsOutput) SetNextToken(v string) *Desc } // Contains the parameters for DescribeNetworkInterfaces. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacesRequest type DescribeNetworkInterfacesInput struct { _ struct{} `type:"structure"` @@ -37433,21 +38864,15 @@ type DescribeNetworkInterfacesInput struct { // // * subnet-id - The ID of the subnet for the network interface. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC for the network interface. Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` @@ -37487,7 +38912,6 @@ func (s *DescribeNetworkInterfacesInput) SetNetworkInterfaceIds(v []*string) *De } // Contains the output of DescribeNetworkInterfaces. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacesResult type DescribeNetworkInterfacesOutput struct { _ struct{} `type:"structure"` @@ -37512,7 +38936,6 @@ func (s *DescribeNetworkInterfacesOutput) SetNetworkInterfaces(v []*NetworkInter } // Contains the parameters for DescribePlacementGroups. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePlacementGroupsRequest type DescribePlacementGroupsInput struct { _ struct{} `type:"structure"` @@ -37567,7 +38990,6 @@ func (s *DescribePlacementGroupsInput) SetGroupNames(v []*string) *DescribePlace } // Contains the output of DescribePlacementGroups. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePlacementGroupsResult type DescribePlacementGroupsOutput struct { _ struct{} `type:"structure"` @@ -37592,7 +39014,6 @@ func (s *DescribePlacementGroupsOutput) SetPlacementGroups(v []*PlacementGroup) } // Contains the parameters for DescribePrefixLists. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrefixListsRequest type DescribePrefixListsInput struct { _ struct{} `type:"structure"` @@ -37666,7 +39087,6 @@ func (s *DescribePrefixListsInput) SetPrefixListIds(v []*string) *DescribePrefix } // Contains the output of DescribePrefixLists. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrefixListsResult type DescribePrefixListsOutput struct { _ struct{} `type:"structure"` @@ -37700,8 +39120,100 @@ func (s *DescribePrefixListsOutput) SetPrefixLists(v []*PrefixList) *DescribePre return s } +type DescribePrincipalIdFormatInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` + + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | instance | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | reservation + // | route-table | route-table-association | security-group | snapshot | subnet + // | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association + // | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway + Resources []*string `locationName:"Resource" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrincipalIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrincipalIdFormatInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribePrincipalIdFormatInput) SetDryRun(v bool) *DescribePrincipalIdFormatInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePrincipalIdFormatInput) SetMaxResults(v int64) *DescribePrincipalIdFormatInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePrincipalIdFormatInput) SetNextToken(v string) *DescribePrincipalIdFormatInput { + s.NextToken = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *DescribePrincipalIdFormatInput) SetResources(v []*string) *DescribePrincipalIdFormatInput { + s.Resources = v + return s +} + +type DescribePrincipalIdFormatOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the ID format settings for the ARN. + Principals []*PrincipalIdFormat `locationName:"principalSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrincipalIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrincipalIdFormatOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePrincipalIdFormatOutput) SetNextToken(v string) *DescribePrincipalIdFormatOutput { + s.NextToken = &v + return s +} + +// SetPrincipals sets the Principals field's value. +func (s *DescribePrincipalIdFormatOutput) SetPrincipals(v []*PrincipalIdFormat) *DescribePrincipalIdFormatOutput { + s.Principals = v + return s +} + // Contains the parameters for DescribeRegions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRegionsRequest type DescribeRegionsInput struct { _ struct{} `type:"structure"` @@ -37751,7 +39263,6 @@ func (s *DescribeRegionsInput) SetRegionNames(v []*string) *DescribeRegionsInput } // Contains the output of DescribeRegions. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRegionsResult type DescribeRegionsOutput struct { _ struct{} `type:"structure"` @@ -37776,7 +39287,6 @@ func (s *DescribeRegionsOutput) SetRegions(v []*Region) *DescribeRegionsOutput { } // Contains the parameters for DescribeReservedInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesRequest type DescribeReservedInstancesInput struct { _ struct{} `type:"structure"` @@ -37821,21 +39331,15 @@ type DescribeReservedInstancesInput struct { // * state - The state of the Reserved Instance (payment-pending | active // | payment-failed | retired). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * usage-price - The usage price of the Reserved Instance, per hour (for // example, 0.84). @@ -37896,7 +39400,6 @@ func (s *DescribeReservedInstancesInput) SetReservedInstancesIds(v []*string) *D } // Contains the parameters for DescribeReservedInstancesListings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesListingsRequest type DescribeReservedInstancesListingsInput struct { _ struct{} `type:"structure"` @@ -37948,7 +39451,6 @@ func (s *DescribeReservedInstancesListingsInput) SetReservedInstancesListingId(v } // Contains the output of DescribeReservedInstancesListings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesListingsResult type DescribeReservedInstancesListingsOutput struct { _ struct{} `type:"structure"` @@ -37973,7 +39475,6 @@ func (s *DescribeReservedInstancesListingsOutput) SetReservedInstancesListings(v } // Contains the parameters for DescribeReservedInstancesModifications. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesModificationsRequest type DescribeReservedInstancesModificationsInput struct { _ struct{} `type:"structure"` @@ -38049,7 +39550,6 @@ func (s *DescribeReservedInstancesModificationsInput) SetReservedInstancesModifi } // Contains the output of DescribeReservedInstancesModifications. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesModificationsResult type DescribeReservedInstancesModificationsOutput struct { _ struct{} `type:"structure"` @@ -38084,7 +39584,6 @@ func (s *DescribeReservedInstancesModificationsOutput) SetReservedInstancesModif } // Contains the parameters for DescribeReservedInstancesOfferings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesOfferingsRequest type DescribeReservedInstancesOfferingsInput struct { _ struct{} `type:"structure"` @@ -38294,7 +39793,6 @@ func (s *DescribeReservedInstancesOfferingsInput) SetReservedInstancesOfferingId } // Contains the output of DescribeReservedInstancesOfferings. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesOfferingsResult type DescribeReservedInstancesOfferingsOutput struct { _ struct{} `type:"structure"` @@ -38329,7 +39827,6 @@ func (s *DescribeReservedInstancesOfferingsOutput) SetReservedInstancesOfferings } // Contains the output for DescribeReservedInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesResult type DescribeReservedInstancesOutput struct { _ struct{} `type:"structure"` @@ -38354,7 +39851,6 @@ func (s *DescribeReservedInstancesOutput) SetReservedInstances(v []*ReservedInst } // Contains the parameters for DescribeRouteTables. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRouteTablesRequest type DescribeRouteTablesInput struct { _ struct{} `type:"structure"` @@ -38413,21 +39909,15 @@ type DescribeRouteTablesInput struct { // * route.vpc-peering-connection-id - The ID of a VPC peering connection // specified in a route in the table. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC for the route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -38467,7 +39957,6 @@ func (s *DescribeRouteTablesInput) SetRouteTableIds(v []*string) *DescribeRouteT } // Contains the output of DescribeRouteTables. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRouteTablesResult type DescribeRouteTablesOutput struct { _ struct{} `type:"structure"` @@ -38492,7 +39981,6 @@ func (s *DescribeRouteTablesOutput) SetRouteTables(v []*RouteTable) *DescribeRou } // Contains the parameters for DescribeScheduledInstanceAvailability. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstanceAvailabilityRequest type DescribeScheduledInstanceAvailabilityInput struct { _ struct{} `type:"structure"` @@ -38622,7 +40110,6 @@ func (s *DescribeScheduledInstanceAvailabilityInput) SetRecurrence(v *ScheduledI } // Contains the output of DescribeScheduledInstanceAvailability. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstanceAvailabilityResult type DescribeScheduledInstanceAvailabilityOutput struct { _ struct{} `type:"structure"` @@ -38657,7 +40144,6 @@ func (s *DescribeScheduledInstanceAvailabilityOutput) SetScheduledInstanceAvaila } // Contains the parameters for DescribeScheduledInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstancesRequest type DescribeScheduledInstancesInput struct { _ struct{} `type:"structure"` @@ -38740,7 +40226,6 @@ func (s *DescribeScheduledInstancesInput) SetSlotStartTimeRange(v *SlotStartTime } // Contains the output of DescribeScheduledInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstancesResult type DescribeScheduledInstancesOutput struct { _ struct{} `type:"structure"` @@ -38774,7 +40259,6 @@ func (s *DescribeScheduledInstancesOutput) SetScheduledInstanceSet(v []*Schedule return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupReferencesRequest type DescribeSecurityGroupReferencesInput struct { _ struct{} `type:"structure"` @@ -38825,7 +40309,6 @@ func (s *DescribeSecurityGroupReferencesInput) SetGroupId(v []*string) *Describe return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupReferencesResult type DescribeSecurityGroupReferencesOutput struct { _ struct{} `type:"structure"` @@ -38850,7 +40333,6 @@ func (s *DescribeSecurityGroupReferencesOutput) SetSecurityGroupReferenceSet(v [ } // Contains the parameters for DescribeSecurityGroups. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupsRequest type DescribeSecurityGroupsInput struct { _ struct{} `type:"structure"` @@ -38926,9 +40408,15 @@ type DescribeSecurityGroupsInput struct { // // * owner-id - The AWS account ID of the owner of the security group. // - // * tag-key - The key of a tag assigned to the security group. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the security group. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC specified when the security group was created. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -38949,7 +40437,8 @@ type DescribeSecurityGroupsInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another request with the returned NextToken value. - // This value can be between 5 and 1000. + // This value can be between 5 and 1000. If this parameter is not specified, + // then all results are returned. MaxResults *int64 `type:"integer"` // The token to request the next page of results. @@ -39003,7 +40492,6 @@ func (s *DescribeSecurityGroupsInput) SetNextToken(v string) *DescribeSecurityGr } // Contains the output of DescribeSecurityGroups. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupsResult type DescribeSecurityGroupsOutput struct { _ struct{} `type:"structure"` @@ -39038,7 +40526,6 @@ func (s *DescribeSecurityGroupsOutput) SetSecurityGroups(v []*SecurityGroup) *De } // Contains the parameters for DescribeSnapshotAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotAttributeRequest type DescribeSnapshotAttributeInput struct { _ struct{} `type:"structure"` @@ -39104,7 +40591,6 @@ func (s *DescribeSnapshotAttributeInput) SetSnapshotId(v string) *DescribeSnapsh } // Contains the output of DescribeSnapshotAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotAttributeResult type DescribeSnapshotAttributeOutput struct { _ struct{} `type:"structure"` @@ -39147,7 +40633,6 @@ func (s *DescribeSnapshotAttributeOutput) SetSnapshotId(v string) *DescribeSnaps } // Contains the parameters for DescribeSnapshots. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotsRequest type DescribeSnapshotsInput struct { _ struct{} `type:"structure"` @@ -39176,21 +40661,15 @@ type DescribeSnapshotsInput struct { // // * status - The status of the snapshot (pending | completed | error). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * volume-id - The ID of the volume the snapshot is for. // @@ -39281,7 +40760,6 @@ func (s *DescribeSnapshotsInput) SetSnapshotIds(v []*string) *DescribeSnapshotsI } // Contains the output of DescribeSnapshots. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotsResult type DescribeSnapshotsOutput struct { _ struct{} `type:"structure"` @@ -39318,7 +40796,6 @@ func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshots } // Contains the parameters for DescribeSpotDatafeedSubscription. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotDatafeedSubscriptionRequest type DescribeSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` @@ -39346,7 +40823,6 @@ func (s *DescribeSpotDatafeedSubscriptionInput) SetDryRun(v bool) *DescribeSpotD } // Contains the output of DescribeSpotDatafeedSubscription. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotDatafeedSubscriptionResult type DescribeSpotDatafeedSubscriptionOutput struct { _ struct{} `type:"structure"` @@ -39371,7 +40847,6 @@ func (s *DescribeSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v * } // Contains the parameters for DescribeSpotFleetInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetInstancesRequest type DescribeSpotFleetInstancesInput struct { _ struct{} `type:"structure"` @@ -39443,12 +40918,11 @@ func (s *DescribeSpotFleetInstancesInput) SetSpotFleetRequestId(v string) *Descr } // Contains the output of DescribeSpotFleetInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetInstancesResponse type DescribeSpotFleetInstancesOutput struct { _ struct{} `type:"structure"` - // The running instances. Note that this list is refreshed periodically and - // might be out of date. + // The running instances. This list is refreshed periodically and might be out + // of date. // // ActiveInstances is a required field ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list" required:"true"` @@ -39492,7 +40966,6 @@ func (s *DescribeSpotFleetInstancesOutput) SetSpotFleetRequestId(v string) *Desc } // Contains the parameters for DescribeSpotFleetRequestHistory. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestHistoryRequest type DescribeSpotFleetRequestHistoryInput struct { _ struct{} `type:"structure"` @@ -39521,7 +40994,7 @@ type DescribeSpotFleetRequestHistoryInput struct { // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // // StartTime is a required field - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` } // String returns the string representation @@ -39587,7 +41060,6 @@ func (s *DescribeSpotFleetRequestHistoryInput) SetStartTime(v time.Time) *Descri } // Contains the output of DescribeSpotFleetRequestHistory. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestHistoryResponse type DescribeSpotFleetRequestHistoryOutput struct { _ struct{} `type:"structure"` @@ -39602,7 +41074,7 @@ type DescribeSpotFleetRequestHistoryOutput struct { // If nextToken indicates that there are more results, this value is not present. // // LastEvaluatedTime is a required field - LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" required:"true"` // The token required to retrieve the next set of results. This value is null // when there are no more results to return. @@ -39616,7 +41088,7 @@ type DescribeSpotFleetRequestHistoryOutput struct { // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // // StartTime is a required field - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` } // String returns the string representation @@ -39660,7 +41132,6 @@ func (s *DescribeSpotFleetRequestHistoryOutput) SetStartTime(v time.Time) *Descr } // Contains the parameters for DescribeSpotFleetRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestsRequest type DescribeSpotFleetRequestsInput struct { _ struct{} `type:"structure"` @@ -39717,7 +41188,6 @@ func (s *DescribeSpotFleetRequestsInput) SetSpotFleetRequestIds(v []*string) *De } // Contains the output of DescribeSpotFleetRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestsResponse type DescribeSpotFleetRequestsOutput struct { _ struct{} `type:"structure"` @@ -39754,7 +41224,6 @@ func (s *DescribeSpotFleetRequestsOutput) SetSpotFleetRequestConfigs(v []*SpotFl } // Contains the parameters for DescribeSpotInstanceRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequestsRequest type DescribeSpotInstanceRequestsInput struct { _ struct{} `type:"structure"` @@ -39794,7 +41263,9 @@ type DescribeSpotInstanceRequestsInput struct { // for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput // Optimized HDD, sc1for Cold HDD, or standard for Magnetic. // - // * launch.group-id - The security group for the instance. + // * launch.group-id - The ID of the security group for the instance. + // + // * launch.group-name - The name of the security group for the instance. // // * launch.image-id - The ID of the AMI. // @@ -39845,7 +41316,7 @@ type DescribeSpotInstanceRequestsInput struct { // | cancelled | failed). Spot request status information can help you track // your Amazon EC2 Spot Instance requests. For more information, see Spot // Request Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide for Linux Instances. // // * status-code - The short code describing the most recent evaluation of // your Spot Instance request. @@ -39853,21 +41324,15 @@ type DescribeSpotInstanceRequestsInput struct { // * status-message - The message explaining the status of the Spot Instance // request. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * type - The type of Spot Instance request (one-time | persistent). // @@ -39909,7 +41374,6 @@ func (s *DescribeSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*strin } // Contains the output of DescribeSpotInstanceRequests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequestsResult type DescribeSpotInstanceRequestsOutput struct { _ struct{} `type:"structure"` @@ -39934,7 +41398,6 @@ func (s *DescribeSpotInstanceRequestsOutput) SetSpotInstanceRequests(v []*SpotIn } // Contains the parameters for DescribeSpotPriceHistory. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotPriceHistoryRequest type DescribeSpotPriceHistoryInput struct { _ struct{} `type:"structure"` @@ -39949,7 +41412,7 @@ type DescribeSpotPriceHistoryInput struct { // The date and time, up to the current date, from which to stop retrieving // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + EndTime *time.Time `locationName:"endTime" type:"timestamp"` // One or more filters. // @@ -39965,9 +41428,9 @@ type DescribeSpotPriceHistoryInput struct { // * spot-price - The Spot price. The value must match exactly (or use wildcards; // greater than or less than comparison is not supported). // - // * timestamp - The timestamp of the Spot price history, in UTC format (for - // example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater - // than or less than comparison is not supported. + // * timestamp - The time stamp of the Spot price history, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). + // Greater than or less than comparison is not supported. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // Filters the results by the specified instance types. @@ -39986,7 +41449,7 @@ type DescribeSpotPriceHistoryInput struct { // The date and time, up to the past 90 days, from which to start retrieving // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + StartTime *time.Time `locationName:"startTime" type:"timestamp"` } // String returns the string representation @@ -40054,12 +41517,11 @@ func (s *DescribeSpotPriceHistoryInput) SetStartTime(v time.Time) *DescribeSpotP } // Contains the output of DescribeSpotPriceHistory. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotPriceHistoryResult type DescribeSpotPriceHistoryOutput struct { _ struct{} `type:"structure"` // The token required to retrieve the next set of results. This value is null - // when there are no more results to return. + // or an empty string when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` // The historical Spot prices. @@ -40088,7 +41550,6 @@ func (s *DescribeSpotPriceHistoryOutput) SetSpotPriceHistory(v []*SpotPrice) *De return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStaleSecurityGroupsRequest type DescribeStaleSecurityGroupsInput struct { _ struct{} `type:"structure"` @@ -40166,7 +41627,6 @@ func (s *DescribeStaleSecurityGroupsInput) SetVpcId(v string) *DescribeStaleSecu return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStaleSecurityGroupsResult type DescribeStaleSecurityGroupsOutput struct { _ struct{} `type:"structure"` @@ -40201,7 +41661,6 @@ func (s *DescribeStaleSecurityGroupsOutput) SetStaleSecurityGroupSet(v []*StaleS } // Contains the parameters for DescribeSubnets. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnetsRequest type DescribeSubnetsInput struct { _ struct{} `type:"structure"` @@ -40239,21 +41698,15 @@ type DescribeSubnetsInput struct { // // * subnet-id - The ID of the subnet. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC for the subnet. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -40293,7 +41746,6 @@ func (s *DescribeSubnetsInput) SetSubnetIds(v []*string) *DescribeSubnetsInput { } // Contains the output of DescribeSubnets. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnetsResult type DescribeSubnetsOutput struct { _ struct{} `type:"structure"` @@ -40318,7 +41770,6 @@ func (s *DescribeSubnetsOutput) SetSubnets(v []*Subnet) *DescribeSubnetsOutput { } // Contains the parameters for DescribeTags. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTagsRequest type DescribeTagsInput struct { _ struct{} `type:"structure"` @@ -40335,9 +41786,10 @@ type DescribeTagsInput struct { // * resource-id - The resource ID. // // * resource-type - The resource type (customer-gateway | dhcp-options | - // image | instance | internet-gateway | network-acl | network-interface - // | reserved-instances | route-table | security-group | snapshot | spot-instances-request - // | subnet | volume | vpc | vpn-connection | vpn-gateway). + // elastic-ip | fleet | fpga-image | image | instance | internet-gateway + // | launch-template | natgateway | network-acl | network-interface | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpc-peering-connection | vpn-connection | vpn-gateway). // // * value - The tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -40386,12 +41838,11 @@ func (s *DescribeTagsInput) SetNextToken(v string) *DescribeTagsInput { } // Contains the output of DescribeTags. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTagsResult type DescribeTagsOutput struct { _ struct{} `type:"structure"` // The token to use to retrieve the next page of results. This value is null - // when there are no more results to return.. + // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` // A list of tags. @@ -40421,7 +41872,6 @@ func (s *DescribeTagsOutput) SetTags(v []*TagDescription) *DescribeTagsOutput { } // Contains the parameters for DescribeVolumeAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttributeRequest type DescribeVolumeAttributeInput struct { _ struct{} `type:"structure"` @@ -40482,7 +41932,6 @@ func (s *DescribeVolumeAttributeInput) SetVolumeId(v string) *DescribeVolumeAttr } // Contains the output of DescribeVolumeAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttributeResult type DescribeVolumeAttributeOutput struct { _ struct{} `type:"structure"` @@ -40525,7 +41974,6 @@ func (s *DescribeVolumeAttributeOutput) SetVolumeId(v string) *DescribeVolumeAtt } // Contains the parameters for DescribeVolumeStatus. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeStatusRequest type DescribeVolumeStatusInput struct { _ struct{} `type:"structure"` @@ -40631,7 +42079,6 @@ func (s *DescribeVolumeStatusInput) SetVolumeIds(v []*string) *DescribeVolumeSta } // Contains the output of DescribeVolumeStatus. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeStatusResult type DescribeVolumeStatusOutput struct { _ struct{} `type:"structure"` @@ -40666,7 +42113,6 @@ func (s *DescribeVolumeStatusOutput) SetVolumeStatuses(v []*VolumeStatusItem) *D } // Contains the parameters for DescribeVolumes. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesRequest type DescribeVolumesInput struct { _ struct{} `type:"structure"` @@ -40689,8 +42135,7 @@ type DescribeVolumesInput struct { // * attachment.instance-id - The ID of the instance the volume is attached // to. // - // * attachment.status - The attachment state (attaching | attached | detaching - // | detached). + // * attachment.status - The attachment state (attaching | attached | detaching). // // * availability-zone - The Availability Zone in which the volume was created. // @@ -40705,21 +42150,15 @@ type DescribeVolumesInput struct { // * status - The status of the volume (creating | available | in-use | deleting // | deleted | error). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * volume-id - The volume ID. // @@ -40789,7 +42228,6 @@ func (s *DescribeVolumesInput) SetVolumeIds(v []*string) *DescribeVolumesInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModificationsRequest type DescribeVolumesModificationsInput struct { _ struct{} `type:"structure"` @@ -40855,7 +42293,6 @@ func (s *DescribeVolumesModificationsInput) SetVolumeIds(v []*string) *DescribeV return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModificationsResult type DescribeVolumesModificationsOutput struct { _ struct{} `type:"structure"` @@ -40889,7 +42326,6 @@ func (s *DescribeVolumesModificationsOutput) SetVolumesModifications(v []*Volume } // Contains the output of DescribeVolumes. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesResult type DescribeVolumesOutput struct { _ struct{} `type:"structure"` @@ -40926,7 +42362,6 @@ func (s *DescribeVolumesOutput) SetVolumes(v []*Volume) *DescribeVolumesOutput { } // Contains the parameters for DescribeVpcAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcAttributeRequest type DescribeVpcAttributeInput struct { _ struct{} `type:"structure"` @@ -40992,7 +42427,6 @@ func (s *DescribeVpcAttributeInput) SetVpcId(v string) *DescribeVpcAttributeInpu } // Contains the output of DescribeVpcAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcAttributeResult type DescribeVpcAttributeOutput struct { _ struct{} `type:"structure"` @@ -41039,7 +42473,6 @@ func (s *DescribeVpcAttributeOutput) SetVpcId(v string) *DescribeVpcAttributeOut } // Contains the parameters for DescribeVpcClassicLinkDnsSupport. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkDnsSupportRequest type DescribeVpcClassicLinkDnsSupportInput struct { _ struct{} `type:"structure"` @@ -41101,7 +42534,6 @@ func (s *DescribeVpcClassicLinkDnsSupportInput) SetVpcIds(v []*string) *Describe } // Contains the output of DescribeVpcClassicLinkDnsSupport. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkDnsSupportResult type DescribeVpcClassicLinkDnsSupportOutput struct { _ struct{} `type:"structure"` @@ -41135,7 +42567,6 @@ func (s *DescribeVpcClassicLinkDnsSupportOutput) SetVpcs(v []*ClassicLinkDnsSupp } // Contains the parameters for DescribeVpcClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkRequest type DescribeVpcClassicLinkInput struct { _ struct{} `type:"structure"` @@ -41150,21 +42581,15 @@ type DescribeVpcClassicLinkInput struct { // * is-classic-link-enabled - Whether the VPC is enabled for ClassicLink // (true | false). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more VPCs for which you want to describe the ClassicLink status. @@ -41200,7 +42625,6 @@ func (s *DescribeVpcClassicLinkInput) SetVpcIds(v []*string) *DescribeVpcClassic } // Contains the output of DescribeVpcClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkResult type DescribeVpcClassicLinkOutput struct { _ struct{} `type:"structure"` @@ -41224,7 +42648,6 @@ func (s *DescribeVpcClassicLinkOutput) SetVpcs(v []*VpcClassicLink) *DescribeVpc return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionNotificationsRequest type DescribeVpcEndpointConnectionNotificationsInput struct { _ struct{} `type:"structure"` @@ -41301,7 +42724,6 @@ func (s *DescribeVpcEndpointConnectionNotificationsInput) SetNextToken(v string) return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionNotificationsResult type DescribeVpcEndpointConnectionNotificationsOutput struct { _ struct{} `type:"structure"` @@ -41335,7 +42757,6 @@ func (s *DescribeVpcEndpointConnectionNotificationsOutput) SetNextToken(v string return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionsRequest type DescribeVpcEndpointConnectionsInput struct { _ struct{} `type:"structure"` @@ -41402,7 +42823,6 @@ func (s *DescribeVpcEndpointConnectionsInput) SetNextToken(v string) *DescribeVp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionsResult type DescribeVpcEndpointConnectionsOutput struct { _ struct{} `type:"structure"` @@ -41436,7 +42856,6 @@ func (s *DescribeVpcEndpointConnectionsOutput) SetVpcEndpointConnections(v []*Vp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServiceConfigurationsRequest type DescribeVpcEndpointServiceConfigurationsInput struct { _ struct{} `type:"structure"` @@ -41510,7 +42929,6 @@ func (s *DescribeVpcEndpointServiceConfigurationsInput) SetServiceIds(v []*strin return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServiceConfigurationsResult type DescribeVpcEndpointServiceConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -41544,7 +42962,6 @@ func (s *DescribeVpcEndpointServiceConfigurationsOutput) SetServiceConfiguration return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicePermissionsRequest type DescribeVpcEndpointServicePermissionsInput struct { _ struct{} `type:"structure"` @@ -41631,7 +43048,6 @@ func (s *DescribeVpcEndpointServicePermissionsInput) SetServiceId(v string) *Des return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicePermissionsResult type DescribeVpcEndpointServicePermissionsOutput struct { _ struct{} `type:"structure"` @@ -41666,7 +43082,6 @@ func (s *DescribeVpcEndpointServicePermissionsOutput) SetNextToken(v string) *De } // Contains the parameters for DescribeVpcEndpointServices. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicesRequest type DescribeVpcEndpointServicesInput struct { _ struct{} `type:"structure"` @@ -41737,7 +43152,6 @@ func (s *DescribeVpcEndpointServicesInput) SetServiceNames(v []*string) *Describ } // Contains the output of DescribeVpcEndpointServices. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicesResult type DescribeVpcEndpointServicesOutput struct { _ struct{} `type:"structure"` @@ -41781,7 +43195,6 @@ func (s *DescribeVpcEndpointServicesOutput) SetServiceNames(v []*string) *Descri } // Contains the parameters for DescribeVpcEndpoints. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointsRequest type DescribeVpcEndpointsInput struct { _ struct{} `type:"structure"` @@ -41859,7 +43272,6 @@ func (s *DescribeVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DescribeVpcE } // Contains the output of DescribeVpcEndpoints. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointsResult type DescribeVpcEndpointsOutput struct { _ struct{} `type:"structure"` @@ -41894,7 +43306,6 @@ func (s *DescribeVpcEndpointsOutput) SetVpcEndpoints(v []*VpcEndpoint) *Describe } // Contains the parameters for DescribeVpcPeeringConnections. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnectionsRequest type DescribeVpcPeeringConnectionsInput struct { _ struct{} `type:"structure"` @@ -41929,21 +43340,15 @@ type DescribeVpcPeeringConnectionsInput struct { // * status-message - A message that provides more information about the // status of the VPC peering connection, if applicable. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-peering-connection-id - The ID of the VPC peering connection. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -41983,7 +43388,6 @@ func (s *DescribeVpcPeeringConnectionsInput) SetVpcPeeringConnectionIds(v []*str } // Contains the output of DescribeVpcPeeringConnections. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnectionsResult type DescribeVpcPeeringConnectionsOutput struct { _ struct{} `type:"structure"` @@ -42008,7 +43412,6 @@ func (s *DescribeVpcPeeringConnectionsOutput) SetVpcPeeringConnections(v []*VpcP } // Contains the parameters for DescribeVpcs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcsRequest type DescribeVpcsInput struct { _ struct{} `type:"structure"` @@ -42049,21 +43452,15 @@ type DescribeVpcsInput struct { // // * state - The state of the VPC (pending | available). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * vpc-id - The ID of the VPC. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -42103,7 +43500,6 @@ func (s *DescribeVpcsInput) SetVpcIds(v []*string) *DescribeVpcsInput { } // Contains the output of DescribeVpcs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcsResult type DescribeVpcsOutput struct { _ struct{} `type:"structure"` @@ -42128,7 +43524,6 @@ func (s *DescribeVpcsOutput) SetVpcs(v []*Vpc) *DescribeVpcsOutput { } // Contains the parameters for DescribeVpnConnections. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnectionsRequest type DescribeVpnConnectionsInput struct { _ struct{} `type:"structure"` @@ -42159,21 +43554,15 @@ type DescribeVpnConnectionsInput struct { // * bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP // device. // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. - // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * type - The type of VPN connection. Currently the only supported type // is ipsec.1. @@ -42219,7 +43608,6 @@ func (s *DescribeVpnConnectionsInput) SetVpnConnectionIds(v []*string) *Describe } // Contains the output of DescribeVpnConnections. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnectionsResult type DescribeVpnConnectionsOutput struct { _ struct{} `type:"structure"` @@ -42244,7 +43632,6 @@ func (s *DescribeVpnConnectionsOutput) SetVpnConnections(v []*VpnConnection) *De } // Contains the parameters for DescribeVpnGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnGatewaysRequest type DescribeVpnGatewaysInput struct { _ struct{} `type:"structure"` @@ -42270,21 +43657,15 @@ type DescribeVpnGatewaysInput struct { // * state - The state of the virtual private gateway (pending | available // | deleting | deleted). // - // * tag:key=value - The key/value combination of a tag assigned to the resource. - // Specify the key of the tag in the filter name and the value of the tag - // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose - // for the filter name and X for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a tag assigned to the resource. This filter is - // independent of the tag-value filter. For example, if you use both the - // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources - // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. - // - // * tag-value - The value of a tag assigned to the resource. This filter - // is independent of the tag-key filter. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. // // * type - The type of virtual private gateway. Currently the only supported // type is ipsec.1. @@ -42327,7 +43708,6 @@ func (s *DescribeVpnGatewaysInput) SetVpnGatewayIds(v []*string) *DescribeVpnGat } // Contains the output of DescribeVpnGateways. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnGatewaysResult type DescribeVpnGatewaysOutput struct { _ struct{} `type:"structure"` @@ -42352,7 +43732,6 @@ func (s *DescribeVpnGatewaysOutput) SetVpnGateways(v []*VpnGateway) *DescribeVpn } // Contains the parameters for DetachClassicLinkVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachClassicLinkVpcRequest type DetachClassicLinkVpcInput struct { _ struct{} `type:"structure"` @@ -42418,7 +43797,6 @@ func (s *DetachClassicLinkVpcInput) SetVpcId(v string) *DetachClassicLinkVpcInpu } // Contains the output of DetachClassicLinkVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachClassicLinkVpcResult type DetachClassicLinkVpcOutput struct { _ struct{} `type:"structure"` @@ -42443,7 +43821,6 @@ func (s *DetachClassicLinkVpcOutput) SetReturn(v bool) *DetachClassicLinkVpcOutp } // Contains the parameters for DetachInternetGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachInternetGatewayRequest type DetachInternetGatewayInput struct { _ struct{} `type:"structure"` @@ -42453,7 +43830,7 @@ type DetachInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The ID of the Internet gateway. + // The ID of the internet gateway. // // InternetGatewayId is a required field InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` @@ -42508,7 +43885,6 @@ func (s *DetachInternetGatewayInput) SetVpcId(v string) *DetachInternetGatewayIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachInternetGatewayOutput type DetachInternetGatewayOutput struct { _ struct{} `type:"structure"` } @@ -42524,7 +43900,6 @@ func (s DetachInternetGatewayOutput) GoString() string { } // Contains the parameters for DetachNetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachNetworkInterfaceRequest type DetachNetworkInterfaceInput struct { _ struct{} `type:"structure"` @@ -42584,7 +43959,6 @@ func (s *DetachNetworkInterfaceInput) SetForce(v bool) *DetachNetworkInterfaceIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachNetworkInterfaceOutput type DetachNetworkInterfaceOutput struct { _ struct{} `type:"structure"` } @@ -42600,7 +43974,6 @@ func (s DetachNetworkInterfaceOutput) GoString() string { } // Contains the parameters for DetachVolume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVolumeRequest type DetachVolumeInput struct { _ struct{} `type:"structure"` @@ -42685,7 +44058,6 @@ func (s *DetachVolumeInput) SetVolumeId(v string) *DetachVolumeInput { } // Contains the parameters for DetachVpnGateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVpnGatewayRequest type DetachVpnGatewayInput struct { _ struct{} `type:"structure"` @@ -42750,7 +44122,6 @@ func (s *DetachVpnGatewayInput) SetVpnGatewayId(v string) *DetachVpnGatewayInput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVpnGatewayOutput type DetachVpnGatewayOutput struct { _ struct{} `type:"structure"` } @@ -42766,7 +44137,6 @@ func (s DetachVpnGatewayOutput) GoString() string { } // Describes a DHCP configuration option. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DhcpConfiguration type DhcpConfiguration struct { _ struct{} `type:"structure"` @@ -42800,7 +44170,6 @@ func (s *DhcpConfiguration) SetValues(v []*AttributeValue) *DhcpConfiguration { } // Describes a set of DHCP options. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DhcpOptions type DhcpOptions struct { _ struct{} `type:"structure"` @@ -42843,7 +44212,6 @@ func (s *DhcpOptions) SetTags(v []*Tag) *DhcpOptions { } // Contains the parameters for DisableVgwRoutePropagation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVgwRoutePropagationRequest type DisableVgwRoutePropagationInput struct { _ struct{} `type:"structure"` @@ -42896,7 +44264,6 @@ func (s *DisableVgwRoutePropagationInput) SetRouteTableId(v string) *DisableVgwR return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVgwRoutePropagationOutput type DisableVgwRoutePropagationOutput struct { _ struct{} `type:"structure"` } @@ -42912,7 +44279,6 @@ func (s DisableVgwRoutePropagationOutput) GoString() string { } // Contains the parameters for DisableVpcClassicLinkDnsSupport. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkDnsSupportRequest type DisableVpcClassicLinkDnsSupportInput struct { _ struct{} `type:"structure"` @@ -42937,7 +44303,6 @@ func (s *DisableVpcClassicLinkDnsSupportInput) SetVpcId(v string) *DisableVpcCla } // Contains the output of DisableVpcClassicLinkDnsSupport. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkDnsSupportResult type DisableVpcClassicLinkDnsSupportOutput struct { _ struct{} `type:"structure"` @@ -42962,7 +44327,6 @@ func (s *DisableVpcClassicLinkDnsSupportOutput) SetReturn(v bool) *DisableVpcCla } // Contains the parameters for DisableVpcClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkRequest type DisableVpcClassicLinkInput struct { _ struct{} `type:"structure"` @@ -43014,7 +44378,6 @@ func (s *DisableVpcClassicLinkInput) SetVpcId(v string) *DisableVpcClassicLinkIn } // Contains the output of DisableVpcClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkResult type DisableVpcClassicLinkOutput struct { _ struct{} `type:"structure"` @@ -43039,7 +44402,6 @@ func (s *DisableVpcClassicLinkOutput) SetReturn(v bool) *DisableVpcClassicLinkOu } // Contains the parameters for DisassociateAddress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateAddressRequest type DisassociateAddressInput struct { _ struct{} `type:"structure"` @@ -43084,7 +44446,6 @@ func (s *DisassociateAddressInput) SetPublicIp(v string) *DisassociateAddressInp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateAddressOutput type DisassociateAddressOutput struct { _ struct{} `type:"structure"` } @@ -43099,7 +44460,6 @@ func (s DisassociateAddressOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfileRequest type DisassociateIamInstanceProfileInput struct { _ struct{} `type:"structure"` @@ -43138,7 +44498,6 @@ func (s *DisassociateIamInstanceProfileInput) SetAssociationId(v string) *Disass return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfileResult type DisassociateIamInstanceProfileOutput struct { _ struct{} `type:"structure"` @@ -43163,7 +44522,6 @@ func (s *DisassociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation( } // Contains the parameters for DisassociateRouteTable. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateRouteTableRequest type DisassociateRouteTableInput struct { _ struct{} `type:"structure"` @@ -43215,7 +44573,6 @@ func (s *DisassociateRouteTableInput) SetDryRun(v bool) *DisassociateRouteTableI return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateRouteTableOutput type DisassociateRouteTableOutput struct { _ struct{} `type:"structure"` } @@ -43230,7 +44587,6 @@ func (s DisassociateRouteTableOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateSubnetCidrBlockRequest type DisassociateSubnetCidrBlockInput struct { _ struct{} `type:"structure"` @@ -43269,7 +44625,6 @@ func (s *DisassociateSubnetCidrBlockInput) SetAssociationId(v string) *Disassoci return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateSubnetCidrBlockResult type DisassociateSubnetCidrBlockOutput struct { _ struct{} `type:"structure"` @@ -43302,7 +44657,6 @@ func (s *DisassociateSubnetCidrBlockOutput) SetSubnetId(v string) *DisassociateS return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateVpcCidrBlockRequest type DisassociateVpcCidrBlockInput struct { _ struct{} `type:"structure"` @@ -43341,7 +44695,6 @@ func (s *DisassociateVpcCidrBlockInput) SetAssociationId(v string) *Disassociate return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateVpcCidrBlockResult type DisassociateVpcCidrBlockOutput struct { _ struct{} `type:"structure"` @@ -43384,7 +44737,6 @@ func (s *DisassociateVpcCidrBlockOutput) SetVpcId(v string) *DisassociateVpcCidr } // Describes a disk image. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DiskImage type DiskImage struct { _ struct{} `type:"structure"` @@ -43447,7 +44799,6 @@ func (s *DiskImage) SetVolume(v *VolumeDetail) *DiskImage { } // Describes a disk image. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DiskImageDescription type DiskImageDescription struct { _ struct{} `type:"structure"` @@ -43455,9 +44806,7 @@ type DiskImageDescription struct { Checksum *string `locationName:"checksum" type:"string"` // The disk image format. - // - // Format is a required field - Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + Format *string `locationName:"format" type:"string" enum:"DiskImageFormat"` // A presigned URL for the import manifest stored in Amazon S3. For information // about creating a presigned URL for an Amazon S3 object, read the "Query String @@ -43467,14 +44816,10 @@ type DiskImageDescription struct { // // For information about the import manifest referenced by this API action, // see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). - // - // ImportManifestUrl is a required field - ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string"` // The size of the disk image, in GiB. - // - // Size is a required field - Size *int64 `locationName:"size" type:"long" required:"true"` + Size *int64 `locationName:"size" type:"long"` } // String returns the string representation @@ -43512,7 +44857,6 @@ func (s *DiskImageDescription) SetSize(v int64) *DiskImageDescription { } // Describes a disk image. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DiskImageDetail type DiskImageDetail struct { _ struct{} `type:"structure"` @@ -43587,14 +44931,11 @@ func (s *DiskImageDetail) SetImportManifestUrl(v string) *DiskImageDetail { } // Describes a disk image volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DiskImageVolumeDescription type DiskImageVolumeDescription struct { _ struct{} `type:"structure"` // The volume identifier. - // - // Id is a required field - Id *string `locationName:"id" type:"string" required:"true"` + Id *string `locationName:"id" type:"string"` // The size of the volume, in GiB. Size *int64 `locationName:"size" type:"long"` @@ -43623,7 +44964,6 @@ func (s *DiskImageVolumeDescription) SetSize(v int64) *DiskImageVolumeDescriptio } // Describes a DNS entry. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DnsEntry type DnsEntry struct { _ struct{} `type:"structure"` @@ -43657,7 +44997,6 @@ func (s *DnsEntry) SetHostedZoneId(v string) *DnsEntry { } // Describes a block device for an EBS volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice type EbsBlockDevice struct { _ struct{} `type:"structure"` @@ -43685,10 +45024,11 @@ type EbsBlockDevice struct { // it is not used in requests to create gp2, st1, sc1, or standard volumes. Iops *int64 `locationName:"iops" type:"integer"` - // ID for a user-managed CMK under which the EBS volume is encrypted. + // Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK + // under which the EBS volume is encrypted. // - // Note: This parameter is only supported on BlockDeviceMapping objects called - // by RunInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + // This parameter is only supported on BlockDeviceMapping objects called by + // RunInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), // RequestSpotFleet (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html), // and RequestSpotInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html). KmsKeyId *string `type:"string"` @@ -43767,12 +45107,11 @@ func (s *EbsBlockDevice) SetVolumeType(v string) *EbsBlockDevice { } // Describes a parameter used to set up an EBS volume in a block device mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsInstanceBlockDevice type EbsInstanceBlockDevice struct { _ struct{} `type:"structure"` // The time stamp when the attachment initiated. - AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` // Indicates whether the volume is deleted on instance termination. DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` @@ -43820,7 +45159,6 @@ func (s *EbsInstanceBlockDevice) SetVolumeId(v string) *EbsInstanceBlockDevice { // Describes information used to set up an EBS volume specified in a block device // mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsInstanceBlockDeviceSpecification type EbsInstanceBlockDeviceSpecification struct { _ struct{} `type:"structure"` @@ -43853,15 +45191,14 @@ func (s *EbsInstanceBlockDeviceSpecification) SetVolumeId(v string) *EbsInstance return s } -// Describes an egress-only Internet gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EgressOnlyInternetGateway +// Describes an egress-only internet gateway. type EgressOnlyInternetGateway struct { _ struct{} `type:"structure"` - // Information about the attachment of the egress-only Internet gateway. + // Information about the attachment of the egress-only internet gateway. Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` - // The ID of the egress-only Internet gateway. + // The ID of the egress-only internet gateway. EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` } @@ -43888,7 +45225,6 @@ func (s *EgressOnlyInternetGateway) SetEgressOnlyInternetGatewayId(v string) *Eg } // Describes the association between an instance and an Elastic GPU. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuAssociation type ElasticGpuAssociation struct { _ struct{} `type:"structure"` @@ -43940,7 +45276,6 @@ func (s *ElasticGpuAssociation) SetElasticGpuId(v string) *ElasticGpuAssociation } // Describes the status of an Elastic GPU. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuHealth type ElasticGpuHealth struct { _ struct{} `type:"structure"` @@ -43965,7 +45300,6 @@ func (s *ElasticGpuHealth) SetStatus(v string) *ElasticGpuHealth { } // A specification for an Elastic GPU. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuSpecification type ElasticGpuSpecification struct { _ struct{} `type:"structure"` @@ -44005,7 +45339,6 @@ func (s *ElasticGpuSpecification) SetType(v string) *ElasticGpuSpecification { } // Describes an elastic GPU. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuSpecificationResponse type ElasticGpuSpecificationResponse struct { _ struct{} `type:"structure"` @@ -44030,7 +45363,6 @@ func (s *ElasticGpuSpecificationResponse) SetType(v string) *ElasticGpuSpecifica } // Describes an Elastic GPU. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpus type ElasticGpus struct { _ struct{} `type:"structure"` @@ -44100,7 +45432,6 @@ func (s *ElasticGpus) SetInstanceId(v string) *ElasticGpus { } // Contains the parameters for EnableVgwRoutePropagation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVgwRoutePropagationRequest type EnableVgwRoutePropagationInput struct { _ struct{} `type:"structure"` @@ -44153,7 +45484,6 @@ func (s *EnableVgwRoutePropagationInput) SetRouteTableId(v string) *EnableVgwRou return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVgwRoutePropagationOutput type EnableVgwRoutePropagationOutput struct { _ struct{} `type:"structure"` } @@ -44169,7 +45499,6 @@ func (s EnableVgwRoutePropagationOutput) GoString() string { } // Contains the parameters for EnableVolumeIO. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVolumeIORequest type EnableVolumeIOInput struct { _ struct{} `type:"structure"` @@ -44220,7 +45549,6 @@ func (s *EnableVolumeIOInput) SetVolumeId(v string) *EnableVolumeIOInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVolumeIOOutput type EnableVolumeIOOutput struct { _ struct{} `type:"structure"` } @@ -44236,7 +45564,6 @@ func (s EnableVolumeIOOutput) GoString() string { } // Contains the parameters for EnableVpcClassicLinkDnsSupport. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkDnsSupportRequest type EnableVpcClassicLinkDnsSupportInput struct { _ struct{} `type:"structure"` @@ -44261,7 +45588,6 @@ func (s *EnableVpcClassicLinkDnsSupportInput) SetVpcId(v string) *EnableVpcClass } // Contains the output of EnableVpcClassicLinkDnsSupport. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkDnsSupportResult type EnableVpcClassicLinkDnsSupportOutput struct { _ struct{} `type:"structure"` @@ -44286,7 +45612,6 @@ func (s *EnableVpcClassicLinkDnsSupportOutput) SetReturn(v bool) *EnableVpcClass } // Contains the parameters for EnableVpcClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkRequest type EnableVpcClassicLinkInput struct { _ struct{} `type:"structure"` @@ -44338,7 +45663,6 @@ func (s *EnableVpcClassicLinkInput) SetVpcId(v string) *EnableVpcClassicLinkInpu } // Contains the output of EnableVpcClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkResult type EnableVpcClassicLinkOutput struct { _ struct{} `type:"structure"` @@ -44363,7 +45687,6 @@ func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutp } // Describes a Spot Fleet event. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EventInformation type EventInformation struct { _ struct{} `type:"structure"` @@ -44395,9 +45718,9 @@ type EventInformation struct { // * cancelled - The Spot Fleet is canceled and has no running Spot Instances. // The Spot Fleet will be deleted two days after its instances were terminated. // - // * cancelled_running - The Spot Fleet is canceled and will not launch additional - // Spot Instances, but its existing Spot Instances continue to run until - // they are interrupted or terminated. + // * cancelled_running - The Spot Fleet is canceled and does not launch additional + // Spot Instances. Existing Spot Instances continue to run until they are + // interrupted or terminated. // // * cancelled_terminating - The Spot Fleet is canceled and its Spot Instances // are terminating. @@ -44467,7 +45790,6 @@ func (s *EventInformation) SetInstanceId(v string) *EventInformation { } // Describes an instance export task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportTask type ExportTask struct { _ struct{} `type:"structure"` @@ -44537,7 +45859,6 @@ func (s *ExportTask) SetStatusMessage(v string) *ExportTask { } // Describes the format and location for an instance export task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportToS3Task type ExportToS3Task struct { _ struct{} `type:"structure"` @@ -44591,7 +45912,6 @@ func (s *ExportToS3Task) SetS3Key(v string) *ExportToS3Task { } // Describes an instance export task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportToS3TaskSpecification type ExportToS3TaskSpecification struct { _ struct{} `type:"structure"` @@ -44646,9 +45966,30 @@ func (s *ExportToS3TaskSpecification) SetS3Prefix(v string) *ExportToS3TaskSpeci } // A filter name and value pair that is used to return a more specific list -// of results. Filters can be used to match a set of resources by various criteria, -// such as tags, attributes, or IDs. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Filter +// of results from a describe operation. Filters can be used to match a set +// of resources by specific criteria, such as tags, attributes, or IDs. The +// filters supported by a describe operation are documented with the describe +// operation. For example: +// +// * DescribeAvailabilityZones +// +// * DescribeImages +// +// * DescribeInstances +// +// * DescribeKeyPairs +// +// * DescribeSecurityGroups +// +// * DescribeSnapshots +// +// * DescribeSubnets +// +// * DescribeTags +// +// * DescribeVolumes +// +// * DescribeVpcs type Filter struct { _ struct{} `type:"structure"` @@ -44681,8 +46022,441 @@ func (s *Filter) SetValues(v []*string) *Filter { return s } +// Describes an EC2 Fleet. +type FleetData struct { + _ struct{} `type:"structure"` + + // The progress of the EC2 Fleet. If there is an error, the status is error. + // After all requests are placed, the status is pending_fulfillment. If the + // size of the EC2 Fleet is equal to or greater than its target capacity, the + // status is fulfilled. If the size of the EC2 Fleet is decreased, the status + // is pending_termination while instances are terminating. + ActivityStatus *string `locationName:"activityStatus" type:"string" enum:"FleetActivityStatus"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string"` + + // The creation date and time of the EC2 Fleet. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // Indicates whether running instances should be terminated if the target capacity + // of the EC2 Fleet is decreased below the current size of the EC2 Fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"FleetExcessCapacityTerminationPolicy"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // The state of the EC2 Fleet. + FleetState *string `locationName:"fleetState" type:"string" enum:"FleetStateCode"` + + // The number of units fulfilled by this request compared to the set target + // capacity. + FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` + + // The number of units fulfilled by this request compared to the set target + // On-Demand capacity. + FulfilledOnDemandCapacity *float64 `locationName:"fulfilledOnDemandCapacity" type:"double"` + + // The launch template and overrides. + LaunchTemplateConfigs []*FleetLaunchTemplateConfig `locationName:"launchTemplateConfigs" locationNameList:"item" type:"list"` + + // The allocation strategy of On-Demand Instances in an EC2 Fleet. + OnDemandOptions *OnDemandOptions `locationName:"onDemandOptions" type:"structure"` + + // Indicates whether EC2 Fleet should replace unhealthy instances. + ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + + // The configuration of Spot Instances in an EC2 Fleet. + SpotOptions *SpotOptions `locationName:"spotOptions" type:"structure"` + + // The tags for an EC2 Fleet resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The number of units to request. You can choose to set the target capacity + // in terms of instances or a performance characteristic that is important to + // your application workload, such as vCPUs, memory, or I/O. If the request + // type is maintain, you can specify a target capacity of 0 and add capacity + // later. + TargetCapacitySpecification *TargetCapacitySpecification `locationName:"targetCapacitySpecification" type:"structure"` + + // Indicates whether running instances should be terminated when the EC2 Fleet + // expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The type of request. Indicates whether the EC2 Fleet only requests the target + // capacity, or also attempts to maintain it. If you request a certain target + // capacity, EC2 Fleet only places the required requests; it does not attempt + // to replenish instances if capacity is diminished, and does not submit requests + // in alternative capacity pools if capacity is unavailable. To maintain a certain + // target capacity, EC2 Fleet places the required requests to meet this target + // capacity. It also automatically replenishes any interrupted Spot Instances. + // Default: maintain. + Type *string `locationName:"type" type:"string" enum:"FleetType"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new instance requests are placed or able to fulfill the + // request. The default end date is 7 days from the current date. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` +} + +// String returns the string representation +func (s FleetData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetData) GoString() string { + return s.String() +} + +// SetActivityStatus sets the ActivityStatus field's value. +func (s *FleetData) SetActivityStatus(v string) *FleetData { + s.ActivityStatus = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *FleetData) SetClientToken(v string) *FleetData { + s.ClientToken = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *FleetData) SetCreateTime(v time.Time) *FleetData { + s.CreateTime = &v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *FleetData) SetExcessCapacityTerminationPolicy(v string) *FleetData { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetData) SetFleetId(v string) *FleetData { + s.FleetId = &v + return s +} + +// SetFleetState sets the FleetState field's value. +func (s *FleetData) SetFleetState(v string) *FleetData { + s.FleetState = &v + return s +} + +// SetFulfilledCapacity sets the FulfilledCapacity field's value. +func (s *FleetData) SetFulfilledCapacity(v float64) *FleetData { + s.FulfilledCapacity = &v + return s +} + +// SetFulfilledOnDemandCapacity sets the FulfilledOnDemandCapacity field's value. +func (s *FleetData) SetFulfilledOnDemandCapacity(v float64) *FleetData { + s.FulfilledOnDemandCapacity = &v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *FleetData) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfig) *FleetData { + s.LaunchTemplateConfigs = v + return s +} + +// SetOnDemandOptions sets the OnDemandOptions field's value. +func (s *FleetData) SetOnDemandOptions(v *OnDemandOptions) *FleetData { + s.OnDemandOptions = v + return s +} + +// SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. +func (s *FleetData) SetReplaceUnhealthyInstances(v bool) *FleetData { + s.ReplaceUnhealthyInstances = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *FleetData) SetSpotOptions(v *SpotOptions) *FleetData { + s.SpotOptions = v + return s +} + +// SetTags sets the Tags field's value. +func (s *FleetData) SetTags(v []*Tag) *FleetData { + s.Tags = v + return s +} + +// SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. +func (s *FleetData) SetTargetCapacitySpecification(v *TargetCapacitySpecification) *FleetData { + s.TargetCapacitySpecification = v + return s +} + +// SetTerminateInstancesWithExpiration sets the TerminateInstancesWithExpiration field's value. +func (s *FleetData) SetTerminateInstancesWithExpiration(v bool) *FleetData { + s.TerminateInstancesWithExpiration = &v + return s +} + +// SetType sets the Type field's value. +func (s *FleetData) SetType(v string) *FleetData { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *FleetData) SetValidFrom(v time.Time) *FleetData { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *FleetData) SetValidUntil(v time.Time) *FleetData { + s.ValidUntil = &v + return s +} + +// Describes a launch template and overrides. +type FleetLaunchTemplateConfig struct { + _ struct{} `type:"structure"` + + // The launch template. + LaunchTemplateSpecification *FleetLaunchTemplateSpecification `locationName:"launchTemplateSpecification" type:"structure"` + + // Any parameters that you specify override the same parameters in the launch + // template. + Overrides []*FleetLaunchTemplateOverrides `locationName:"overrides" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FleetLaunchTemplateConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateConfig) GoString() string { + return s.String() +} + +// SetLaunchTemplateSpecification sets the LaunchTemplateSpecification field's value. +func (s *FleetLaunchTemplateConfig) SetLaunchTemplateSpecification(v *FleetLaunchTemplateSpecification) *FleetLaunchTemplateConfig { + s.LaunchTemplateSpecification = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *FleetLaunchTemplateConfig) SetOverrides(v []*FleetLaunchTemplateOverrides) *FleetLaunchTemplateConfig { + s.Overrides = v + return s +} + +// Describes a launch template and overrides. +type FleetLaunchTemplateConfigRequest struct { + _ struct{} `type:"structure"` + + // The launch template to use. You must specify either the launch template ID + // or launch template name in the request. + LaunchTemplateSpecification *FleetLaunchTemplateSpecificationRequest `type:"structure"` + + // Any parameters that you specify override the same parameters in the launch + // template. + Overrides []*FleetLaunchTemplateOverridesRequest `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FleetLaunchTemplateConfigRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateConfigRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FleetLaunchTemplateConfigRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FleetLaunchTemplateConfigRequest"} + if s.LaunchTemplateSpecification != nil { + if err := s.LaunchTemplateSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchTemplateSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateSpecification sets the LaunchTemplateSpecification field's value. +func (s *FleetLaunchTemplateConfigRequest) SetLaunchTemplateSpecification(v *FleetLaunchTemplateSpecificationRequest) *FleetLaunchTemplateConfigRequest { + s.LaunchTemplateSpecification = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *FleetLaunchTemplateConfigRequest) SetOverrides(v []*FleetLaunchTemplateOverridesRequest) *FleetLaunchTemplateConfigRequest { + s.Overrides = v + return s +} + +// Describes overrides for a launch template. +type FleetLaunchTemplateOverrides struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to launch the instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + MaxPrice *string `locationName:"maxPrice" type:"string"` + + // The priority for the launch template override. If AllocationStrategy is set + // to prioritized, EC2 Fleet uses priority to determine which launch template + // override to use first in fulfilling On-Demand capacity. The highest priority + // is launched first. Valid values are whole numbers starting at 0. The lower + // the number, the higher the priority. If no number is set, the override has + // the lowest priority. + Priority *float64 `locationName:"priority" type:"double"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The number of units provided by the specified instance type. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s FleetLaunchTemplateOverrides) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateOverrides) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *FleetLaunchTemplateOverrides) SetAvailabilityZone(v string) *FleetLaunchTemplateOverrides { + s.AvailabilityZone = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *FleetLaunchTemplateOverrides) SetInstanceType(v string) *FleetLaunchTemplateOverrides { + s.InstanceType = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *FleetLaunchTemplateOverrides) SetMaxPrice(v string) *FleetLaunchTemplateOverrides { + s.MaxPrice = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *FleetLaunchTemplateOverrides) SetPriority(v float64) *FleetLaunchTemplateOverrides { + s.Priority = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *FleetLaunchTemplateOverrides) SetSubnetId(v string) *FleetLaunchTemplateOverrides { + s.SubnetId = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *FleetLaunchTemplateOverrides) SetWeightedCapacity(v float64) *FleetLaunchTemplateOverrides { + s.WeightedCapacity = &v + return s +} + +// Describes overrides for a launch template. +type FleetLaunchTemplateOverridesRequest struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to launch the instances. + AvailabilityZone *string `type:"string"` + + // The instance type. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + MaxPrice *string `type:"string"` + + // The priority for the launch template override. If AllocationStrategy is set + // to prioritized, EC2 Fleet uses priority to determine which launch template + // override to use first in fulfilling On-Demand capacity. The highest priority + // is launched first. Valid values are whole numbers starting at 0. The lower + // the number, the higher the priority. If no number is set, the launch template + // override has the lowest priority. + Priority *float64 `type:"double"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `type:"string"` + + // The number of units provided by the specified instance type. + WeightedCapacity *float64 `type:"double"` +} + +// String returns the string representation +func (s FleetLaunchTemplateOverridesRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateOverridesRequest) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetAvailabilityZone(v string) *FleetLaunchTemplateOverridesRequest { + s.AvailabilityZone = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetInstanceType(v string) *FleetLaunchTemplateOverridesRequest { + s.InstanceType = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetMaxPrice(v string) *FleetLaunchTemplateOverridesRequest { + s.MaxPrice = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetPriority(v float64) *FleetLaunchTemplateOverridesRequest { + s.Priority = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetSubnetId(v string) *FleetLaunchTemplateOverridesRequest { + s.SubnetId = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetWeightedCapacity(v float64) *FleetLaunchTemplateOverridesRequest { + s.WeightedCapacity = &v + return s +} + // Describes a launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FleetLaunchTemplateSpecification type FleetLaunchTemplateSpecification struct { _ struct{} `type:"structure"` @@ -44740,20 +46514,75 @@ func (s *FleetLaunchTemplateSpecification) SetVersion(v string) *FleetLaunchTemp return s } +// The launch template to use. You must specify either the launch template ID +// or launch template name in the request. +type FleetLaunchTemplateSpecificationRequest struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. + LaunchTemplateName *string `min:"3" type:"string"` + + // The version number of the launch template. + Version *string `type:"string"` +} + +// String returns the string representation +func (s FleetLaunchTemplateSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateSpecificationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FleetLaunchTemplateSpecificationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FleetLaunchTemplateSpecificationRequest"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *FleetLaunchTemplateSpecificationRequest) SetLaunchTemplateId(v string) *FleetLaunchTemplateSpecificationRequest { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *FleetLaunchTemplateSpecificationRequest) SetLaunchTemplateName(v string) *FleetLaunchTemplateSpecificationRequest { + s.LaunchTemplateName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *FleetLaunchTemplateSpecificationRequest) SetVersion(v string) *FleetLaunchTemplateSpecificationRequest { + s.Version = &v + return s +} + // Describes a flow log. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FlowLog type FlowLog struct { _ struct{} `type:"structure"` // The date and time the flow log was created. - CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` // Information about the error that occurred. Rate limited indicates that CloudWatch - // logs throttling has been applied for one or more network interfaces, or that - // you've reached the limit on the number of CloudWatch Logs log groups that - // you can create. Access error indicates that the IAM role associated with - // the flow log does not have sufficient permissions to publish to CloudWatch - // Logs. Unknown error indicates an internal error. + // Logs throttling has been applied for one or more network interfaces, or that + // you've reached the limit on the number of log groups that you can create. + // Access error indicates that the IAM role associated with the flow log does + // not have sufficient permissions to publish to CloudWatch Logs. Unknown error + // indicates an internal error. DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"` // The ARN of the IAM role that posts logs to CloudWatch Logs. @@ -44768,6 +46597,18 @@ type FlowLog struct { // The status of the flow log (ACTIVE). FlowLogStatus *string `locationName:"flowLogStatus" type:"string"` + // Specifies the destination to which the flow log data is published. Flow log + // data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. + // If the flow log publishes to CloudWatch Logs, this element indicates the + // Amazon Resource Name (ARN) of the CloudWatch Logs log group to which the + // data is published. If the flow log publishes to Amazon S3, this element indicates + // the ARN of the Amazon S3 bucket to which the data is published. + LogDestination *string `locationName:"logDestination" type:"string"` + + // Specifies the type of destination to which the flow log data is published. + // Flow log data can be published to CloudWatch Logs or Amazon S3. + LogDestinationType *string `locationName:"logDestinationType" type:"string" enum:"LogDestinationType"` + // The name of the flow log group. LogGroupName *string `locationName:"logGroupName" type:"string"` @@ -44824,6 +46665,18 @@ func (s *FlowLog) SetFlowLogStatus(v string) *FlowLog { return s } +// SetLogDestination sets the LogDestination field's value. +func (s *FlowLog) SetLogDestination(v string) *FlowLog { + s.LogDestination = &v + return s +} + +// SetLogDestinationType sets the LogDestinationType field's value. +func (s *FlowLog) SetLogDestinationType(v string) *FlowLog { + s.LogDestinationType = &v + return s +} + // SetLogGroupName sets the LogGroupName field's value. func (s *FlowLog) SetLogGroupName(v string) *FlowLog { s.LogGroupName = &v @@ -44843,12 +46696,11 @@ func (s *FlowLog) SetTrafficType(v string) *FlowLog { } // Describes an Amazon FPGA image (AFI). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImage type FpgaImage struct { _ struct{} `type:"structure"` // The date and time the AFI was created. - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` // The description of the AFI. Description *string `locationName:"description" type:"string"` @@ -44887,7 +46739,7 @@ type FpgaImage struct { Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"` // The time of the most recent update to the AFI. - UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp"` } // String returns the string representation @@ -44985,7 +46837,6 @@ func (s *FpgaImage) SetUpdateTime(v time.Time) *FpgaImage { } // Describes an Amazon FPGA image (AFI) attribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageAttribute type FpgaImageAttribute struct { _ struct{} `type:"structure"` @@ -45047,7 +46898,6 @@ func (s *FpgaImageAttribute) SetProductCodes(v []*ProductCode) *FpgaImageAttribu // Describes the state of the bitstream generation process for an Amazon FPGA // image (AFI). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageState type FpgaImageState struct { _ struct{} `type:"structure"` @@ -45089,7 +46939,6 @@ func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { } // Contains the parameters for GetConsoleOutput. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutputRequest type GetConsoleOutputInput struct { _ struct{} `type:"structure"` @@ -45103,6 +46952,11 @@ type GetConsoleOutputInput struct { // // InstanceId is a required field InstanceId *string `type:"string" required:"true"` + + // When enabled, retrieves the latest console output for the instance. + // + // Default: disabled (false) + Latest *bool `type:"boolean"` } // String returns the string representation @@ -45140,20 +46994,25 @@ func (s *GetConsoleOutputInput) SetInstanceId(v string) *GetConsoleOutputInput { return s } +// SetLatest sets the Latest field's value. +func (s *GetConsoleOutputInput) SetLatest(v bool) *GetConsoleOutputInput { + s.Latest = &v + return s +} + // Contains the output of GetConsoleOutput. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutputResult type GetConsoleOutputOutput struct { _ struct{} `type:"structure"` // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` - // The console output, Base64-encoded. If using a command line tool, the tool - // decodes the output for you. + // The console output, base64-encoded. If you are using a command line tool, + // the tool decodes the output for you. Output *string `locationName:"output" type:"string"` - // The time the output was last updated. - Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + // The time at which the output was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` } // String returns the string representation @@ -45185,7 +47044,6 @@ func (s *GetConsoleOutputOutput) SetTimestamp(v time.Time) *GetConsoleOutputOutp } // Contains the parameters for the request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleScreenshotRequest type GetConsoleScreenshotInput struct { _ struct{} `type:"structure"` @@ -45247,7 +47105,6 @@ func (s *GetConsoleScreenshotInput) SetWakeUp(v bool) *GetConsoleScreenshotInput } // Contains the output of the request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleScreenshotResult type GetConsoleScreenshotOutput struct { _ struct{} `type:"structure"` @@ -45280,12 +47137,10 @@ func (s *GetConsoleScreenshotOutput) SetInstanceId(v string) *GetConsoleScreensh return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetHostReservationPurchasePreviewRequest type GetHostReservationPurchasePreviewInput struct { _ struct{} `type:"structure"` - // The ID/s of the Dedicated Host/s that the reservation will be associated - // with. + // The IDs of the Dedicated Hosts with which the reservation is associated. // // HostIdSet is a required field HostIdSet []*string `locationNameList:"item" type:"list" required:"true"` @@ -45334,7 +47189,6 @@ func (s *GetHostReservationPurchasePreviewInput) SetOfferingId(v string) *GetHos return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetHostReservationPurchasePreviewResult type GetHostReservationPurchasePreviewOutput struct { _ struct{} `type:"structure"` @@ -45342,7 +47196,7 @@ type GetHostReservationPurchasePreviewOutput struct { // are specified. At this time, the only supported currency is USD. CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` - // The purchase information of the Dedicated Host Reservation and the Dedicated + // The purchase information of the Dedicated Host reservation and the Dedicated // Hosts associated with it. Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` @@ -45387,7 +47241,6 @@ func (s *GetHostReservationPurchasePreviewOutput) SetTotalUpfrontPrice(v string) return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetLaunchTemplateDataRequest type GetLaunchTemplateDataInput struct { _ struct{} `type:"structure"` @@ -45438,7 +47291,6 @@ func (s *GetLaunchTemplateDataInput) SetInstanceId(v string) *GetLaunchTemplateD return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetLaunchTemplateDataResult type GetLaunchTemplateDataOutput struct { _ struct{} `type:"structure"` @@ -45463,7 +47315,6 @@ func (s *GetLaunchTemplateDataOutput) SetLaunchTemplateData(v *ResponseLaunchTem } // Contains the parameters for GetPasswordData. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordDataRequest type GetPasswordDataInput struct { _ struct{} `type:"structure"` @@ -45515,7 +47366,6 @@ func (s *GetPasswordDataInput) SetInstanceId(v string) *GetPasswordDataInput { } // Contains the output of GetPasswordData. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordDataResult type GetPasswordDataOutput struct { _ struct{} `type:"structure"` @@ -45527,7 +47377,7 @@ type GetPasswordDataOutput struct { PasswordData *string `locationName:"passwordData" type:"string"` // The time the data was last updated. - Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` } // String returns the string representation @@ -45559,7 +47409,6 @@ func (s *GetPasswordDataOutput) SetTimestamp(v time.Time) *GetPasswordDataOutput } // Contains the parameters for GetReservedInstanceExchangeQuote. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetReservedInstancesExchangeQuoteRequest type GetReservedInstancesExchangeQuoteInput struct { _ struct{} `type:"structure"` @@ -45631,7 +47480,6 @@ func (s *GetReservedInstancesExchangeQuoteInput) SetTargetConfigurations(v []*Ta } // Contains the output of GetReservedInstancesExchangeQuote. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetReservedInstancesExchangeQuoteResult type GetReservedInstancesExchangeQuoteOutput struct { _ struct{} `type:"structure"` @@ -45642,7 +47490,7 @@ type GetReservedInstancesExchangeQuoteOutput struct { IsValidExchange *bool `locationName:"isValidExchange" type:"boolean"` // The new end date of the reservation term. - OutputReservedInstancesWillExpireAt *time.Time `locationName:"outputReservedInstancesWillExpireAt" type:"timestamp" timestampFormat:"iso8601"` + OutputReservedInstancesWillExpireAt *time.Time `locationName:"outputReservedInstancesWillExpireAt" type:"timestamp"` // The total true upfront charge for the exchange. PaymentDue *string `locationName:"paymentDue" type:"string"` @@ -45728,7 +47576,6 @@ func (s *GetReservedInstancesExchangeQuoteOutput) SetValidationFailureReason(v s } // Describes a security group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GroupIdentifier type GroupIdentifier struct { _ struct{} `type:"structure"` @@ -45762,7 +47609,6 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { } // Describes an event in the history of the Spot Fleet request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/HistoryRecord type HistoryRecord struct { _ struct{} `type:"structure"` @@ -45788,7 +47634,7 @@ type HistoryRecord struct { // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // // Timestamp is a required field - Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601" required:"true"` + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" required:"true"` } // String returns the string representation @@ -45819,11 +47665,55 @@ func (s *HistoryRecord) SetTimestamp(v time.Time) *HistoryRecord { return s } +// Describes an event in the history of an EC2 Fleet. +type HistoryRecordEntry struct { + _ struct{} `type:"structure"` + + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure"` + + // The event type. + EventType *string `locationName:"eventType" type:"string" enum:"FleetEventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s HistoryRecordEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecordEntry) GoString() string { + return s.String() +} + +// SetEventInformation sets the EventInformation field's value. +func (s *HistoryRecordEntry) SetEventInformation(v *EventInformation) *HistoryRecordEntry { + s.EventInformation = v + return s +} + +// SetEventType sets the EventType field's value. +func (s *HistoryRecordEntry) SetEventType(v string) *HistoryRecordEntry { + s.EventType = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *HistoryRecordEntry) SetTimestamp(v time.Time) *HistoryRecordEntry { + s.Timestamp = &v + return s +} + // Describes the properties of the Dedicated Host. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Host type Host struct { _ struct{} `type:"structure"` + // The time that the Dedicated Host was allocated. + AllocationTime *time.Time `locationName:"allocationTime" type:"timestamp"` + // Whether auto-placement is on or off. AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` @@ -45833,8 +47723,8 @@ type Host struct { // The number of new instances that can be launched onto the Dedicated Host. AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"` - // Unique, case-sensitive identifier you provide to ensure idempotency of the - // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // Unique, case-sensitive identifier that you provide to ensure idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) // in the Amazon Elastic Compute Cloud User Guide. ClientToken *string `locationName:"clientToken" type:"string"` @@ -45851,8 +47741,14 @@ type Host struct { // The IDs and instance type that are currently running on the Dedicated Host. Instances []*HostInstance `locationName:"instances" locationNameList:"item" type:"list"` + // The time that the Dedicated Host was released. + ReleaseTime *time.Time `locationName:"releaseTime" type:"timestamp"` + // The Dedicated Host's state. State *string `locationName:"state" type:"string" enum:"AllocationState"` + + // Any tags assigned to the Dedicated Host. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -45865,6 +47761,12 @@ func (s Host) GoString() string { return s.String() } +// SetAllocationTime sets the AllocationTime field's value. +func (s *Host) SetAllocationTime(v time.Time) *Host { + s.AllocationTime = &v + return s +} + // SetAutoPlacement sets the AutoPlacement field's value. func (s *Host) SetAutoPlacement(v string) *Host { s.AutoPlacement = &v @@ -45913,14 +47815,25 @@ func (s *Host) SetInstances(v []*HostInstance) *Host { return s } +// SetReleaseTime sets the ReleaseTime field's value. +func (s *Host) SetReleaseTime(v time.Time) *Host { + s.ReleaseTime = &v + return s +} + // SetState sets the State field's value. func (s *Host) SetState(v string) *Host { s.State = &v return s } +// SetTags sets the Tags field's value. +func (s *Host) SetTags(v []*Tag) *Host { + s.Tags = v + return s +} + // Describes an instance running on a Dedicated Host. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/HostInstance type HostInstance struct { _ struct{} `type:"structure"` @@ -45954,7 +47867,6 @@ func (s *HostInstance) SetInstanceType(v string) *HostInstance { } // Details about the Dedicated Host Reservation offering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/HostOffering type HostOffering struct { _ struct{} `type:"structure"` @@ -46033,7 +47945,6 @@ func (s *HostOffering) SetUpfrontPrice(v string) *HostOffering { } // Describes properties of a Dedicated Host. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/HostProperties type HostProperties struct { _ struct{} `type:"structure"` @@ -46085,7 +47996,6 @@ func (s *HostProperties) SetTotalVCpus(v int64) *HostProperties { } // Details about the Dedicated Host Reservation and associated Dedicated Hosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/HostReservation type HostReservation struct { _ struct{} `type:"structure"` @@ -46101,7 +48011,7 @@ type HostReservation struct { Duration *int64 `locationName:"duration" type:"integer"` // The date and time that the reservation ends. - End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` + End *time.Time `locationName:"end" type:"timestamp"` // The IDs of the Dedicated Hosts associated with the reservation. HostIdSet []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` @@ -46125,7 +48035,7 @@ type HostReservation struct { PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"` // The date and time that the reservation started. - Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` + Start *time.Time `locationName:"start" type:"timestamp"` // The state of the reservation. State *string `locationName:"state" type:"string" enum:"ReservationState"` @@ -46223,7 +48133,6 @@ func (s *HostReservation) SetUpfrontPrice(v string) *HostReservation { } // Describes an IAM instance profile. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IamInstanceProfile type IamInstanceProfile struct { _ struct{} `type:"structure"` @@ -46257,7 +48166,6 @@ func (s *IamInstanceProfile) SetId(v string) *IamInstanceProfile { } // Describes an association between an IAM instance profile and an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IamInstanceProfileAssociation type IamInstanceProfileAssociation struct { _ struct{} `type:"structure"` @@ -46274,7 +48182,7 @@ type IamInstanceProfileAssociation struct { State *string `locationName:"state" type:"string" enum:"IamInstanceProfileAssociationState"` // The time the IAM instance profile was associated with the instance. - Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` } // String returns the string representation @@ -46318,7 +48226,6 @@ func (s *IamInstanceProfileAssociation) SetTimestamp(v time.Time) *IamInstancePr } // Describes an IAM instance profile. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IamInstanceProfileSpecification type IamInstanceProfileSpecification struct { _ struct{} `type:"structure"` @@ -46352,7 +48259,6 @@ func (s *IamInstanceProfileSpecification) SetName(v string) *IamInstanceProfileS } // Describes the ICMP type and code. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IcmpTypeCode type IcmpTypeCode struct { _ struct{} `type:"structure"` @@ -46386,14 +48292,13 @@ func (s *IcmpTypeCode) SetType(v int64) *IcmpTypeCode { } // Describes the ID format for a resource. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IdFormat type IdFormat struct { _ struct{} `type:"structure"` // The date in UTC at which you are permanently switched over to using longer // IDs. If a deadline is not yet available for this resource type, this field // is not returned. - Deadline *time.Time `locationName:"deadline" type:"timestamp" timestampFormat:"iso8601"` + Deadline *time.Time `locationName:"deadline" type:"timestamp"` // The type of resource. Resource *string `locationName:"resource" type:"string"` @@ -46431,7 +48336,6 @@ func (s *IdFormat) SetUseLongIds(v bool) *IdFormat { } // Describes an image. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Image type Image struct { _ struct{} `type:"structure"` @@ -46671,7 +48575,6 @@ func (s *Image) SetVirtualizationType(v string) *Image { } // Describes the disk container object for an import image task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImageDiskContainer type ImageDiskContainer struct { _ struct{} `type:"structure"` @@ -46683,7 +48586,7 @@ type ImageDiskContainer struct { // The format of the disk image being imported. // - // Valid values: RAW | VHD | VMDK | OVA + // Valid values: VHD | VMDK | OVA Format *string `type:"string"` // The ID of the EBS snapshot to be used for importing the snapshot. @@ -46744,7 +48647,6 @@ func (s *ImageDiskContainer) SetUserBucket(v *UserBucket) *ImageDiskContainer { } // Contains the parameters for ImportImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportImageRequest type ImportImageInput struct { _ struct{} `type:"structure"` @@ -46866,7 +48768,6 @@ func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput { } // Contains the output for ImportImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportImageResult type ImportImageOutput struct { _ struct{} `type:"structure"` @@ -46981,7 +48882,6 @@ func (s *ImportImageOutput) SetStatusMessage(v string) *ImportImageOutput { } // Describes an import image task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportImageTask type ImportImageTask struct { _ struct{} `type:"structure"` @@ -47100,7 +49000,6 @@ func (s *ImportImageTask) SetStatusMessage(v string) *ImportImageTask { } // Contains the parameters for ImportInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstanceRequest type ImportInstanceInput struct { _ struct{} `type:"structure"` @@ -47189,7 +49088,6 @@ func (s *ImportInstanceInput) SetPlatform(v string) *ImportInstanceInput { } // Describes the launch specification for VM import. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstanceLaunchSpecification type ImportInstanceLaunchSpecification struct { _ struct{} `type:"structure"` @@ -47210,7 +49108,7 @@ type ImportInstanceLaunchSpecification struct { InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` // The instance type. For more information about the instance types that you - // can import, see Instance Types (http://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#vmimport-instance-types) + // can import, see Instance Types (http://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-instance-types) // in the VM Import/Export User Guide. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` @@ -47226,9 +49124,7 @@ type ImportInstanceLaunchSpecification struct { // [EC2-VPC] The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` - // The user data to make available to the instance. If you are using an AWS - // SDK or command line tool, Base64-encoding is performed for you, and you can - // load the text from a file. Otherwise, you must provide Base64-encoded text. + // The Base64-encoded user data to make available to the instance. UserData *UserData `locationName:"userData" type:"structure"` } @@ -47309,7 +49205,6 @@ func (s *ImportInstanceLaunchSpecification) SetUserData(v *UserData) *ImportInst } // Contains the output for ImportInstance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstanceResult type ImportInstanceOutput struct { _ struct{} `type:"structure"` @@ -47334,7 +49229,6 @@ func (s *ImportInstanceOutput) SetConversionTask(v *ConversionTask) *ImportInsta } // Describes an import instance task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstanceTaskDetails type ImportInstanceTaskDetails struct { _ struct{} `type:"structure"` @@ -47348,9 +49242,7 @@ type ImportInstanceTaskDetails struct { Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` // One or more volumes. - // - // Volumes is a required field - Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list" required:"true"` + Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list"` } // String returns the string representation @@ -47388,7 +49280,6 @@ func (s *ImportInstanceTaskDetails) SetVolumes(v []*ImportInstanceVolumeDetailIt } // Describes an import volume task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstanceVolumeDetailItem type ImportInstanceVolumeDetailItem struct { _ struct{} `type:"structure"` @@ -47477,7 +49368,6 @@ func (s *ImportInstanceVolumeDetailItem) SetVolume(v *DiskImageVolumeDescription } // Contains the parameters for ImportKeyPair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportKeyPairRequest type ImportKeyPairInput struct { _ struct{} `type:"structure"` @@ -47546,7 +49436,6 @@ func (s *ImportKeyPairInput) SetPublicKeyMaterial(v []byte) *ImportKeyPairInput } // Contains the output of ImportKeyPair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportKeyPairResult type ImportKeyPairOutput struct { _ struct{} `type:"structure"` @@ -47580,7 +49469,6 @@ func (s *ImportKeyPairOutput) SetKeyName(v string) *ImportKeyPairOutput { } // Contains the parameters for ImportSnapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportSnapshotRequest type ImportSnapshotInput struct { _ struct{} `type:"structure"` @@ -47653,7 +49541,6 @@ func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput { } // Contains the output for ImportSnapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportSnapshotResult type ImportSnapshotOutput struct { _ struct{} `type:"structure"` @@ -47696,7 +49583,6 @@ func (s *ImportSnapshotOutput) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *Imp } // Describes an import snapshot task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportSnapshotTask type ImportSnapshotTask struct { _ struct{} `type:"structure"` @@ -47739,7 +49625,6 @@ func (s *ImportSnapshotTask) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *Impor } // Contains the parameters for ImportVolume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportVolumeRequest type ImportVolumeInput struct { _ struct{} `type:"structure"` @@ -47838,7 +49723,6 @@ func (s *ImportVolumeInput) SetVolume(v *VolumeDetail) *ImportVolumeInput { } // Contains the output for ImportVolume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportVolumeResult type ImportVolumeOutput struct { _ struct{} `type:"structure"` @@ -47863,32 +49747,23 @@ func (s *ImportVolumeOutput) SetConversionTask(v *ConversionTask) *ImportVolumeO } // Describes an import volume task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportVolumeTaskDetails type ImportVolumeTaskDetails struct { _ struct{} `type:"structure"` // The Availability Zone where the resulting volume will reside. - // - // AvailabilityZone is a required field - AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The number of bytes converted so far. - // - // BytesConverted is a required field - BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + BytesConverted *int64 `locationName:"bytesConverted" type:"long"` // The description you provided when starting the import volume task. Description *string `locationName:"description" type:"string"` // The image. - // - // Image is a required field - Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + Image *DiskImageDescription `locationName:"image" type:"structure"` // The volume. - // - // Volume is a required field - Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure"` } // String returns the string representation @@ -47932,7 +49807,6 @@ func (s *ImportVolumeTaskDetails) SetVolume(v *DiskImageVolumeDescription) *Impo } // Describes an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Instance type Instance struct { _ struct{} `type:"structure"` @@ -47949,6 +49823,9 @@ type Instance struct { // The idempotency token you provided when you launched the instance, if applicable. ClientToken *string `locationName:"clientToken" type:"string"` + // The CPU options for the instance. + CpuOptions *CpuOptions `locationName:"cpuOptions" type:"structure"` + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal I/O performance. This optimization isn't available @@ -47988,7 +49865,7 @@ type Instance struct { KeyName *string `locationName:"keyName" type:"string"` // The time the instance was launched. - LaunchTime *time.Time `locationName:"launchTime" type:"timestamp" timestampFormat:"iso8601"` + LaunchTime *time.Time `locationName:"launchTime" type:"timestamp"` // The monitoring for the instance. Monitoring *Monitoring `locationName:"monitoring" type:"structure"` @@ -48110,6 +49987,12 @@ func (s *Instance) SetClientToken(v string) *Instance { return s } +// SetCpuOptions sets the CpuOptions field's value. +func (s *Instance) SetCpuOptions(v *CpuOptions) *Instance { + s.CpuOptions = v + return s +} + // SetEbsOptimized sets the EbsOptimized field's value. func (s *Instance) SetEbsOptimized(v bool) *Instance { s.EbsOptimized = &v @@ -48321,7 +50204,6 @@ func (s *Instance) SetVpcId(v string) *Instance { } // Describes a block device mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceBlockDeviceMapping type InstanceBlockDeviceMapping struct { _ struct{} `type:"structure"` @@ -48356,7 +50238,6 @@ func (s *InstanceBlockDeviceMapping) SetEbs(v *EbsInstanceBlockDevice) *Instance } // Describes a block device mapping entry. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceBlockDeviceMappingSpecification type InstanceBlockDeviceMappingSpecification struct { _ struct{} `type:"structure"` @@ -48409,7 +50290,6 @@ func (s *InstanceBlockDeviceMappingSpecification) SetVirtualName(v string) *Inst } // Information about the instance type that the Dedicated Host supports. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceCapacity type InstanceCapacity struct { _ struct{} `type:"structure"` @@ -48452,7 +50332,6 @@ func (s *InstanceCapacity) SetTotalCapacity(v int64) *InstanceCapacity { } // Describes a Reserved Instance listing state. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceCount type InstanceCount struct { _ struct{} `type:"structure"` @@ -48486,7 +50365,6 @@ func (s *InstanceCount) SetState(v string) *InstanceCount { } // Describes the credit option for CPU usage of a T2 instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceCreditSpecification type InstanceCreditSpecification struct { _ struct{} `type:"structure"` @@ -48521,7 +50399,6 @@ func (s *InstanceCreditSpecification) SetInstanceId(v string) *InstanceCreditSpe } // Describes the credit option for CPU usage of a T2 instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceCreditSpecificationRequest type InstanceCreditSpecificationRequest struct { _ struct{} `type:"structure"` @@ -48556,7 +50433,6 @@ func (s *InstanceCreditSpecificationRequest) SetInstanceId(v string) *InstanceCr } // Describes an instance to export. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceExportDetails type InstanceExportDetails struct { _ struct{} `type:"structure"` @@ -48590,7 +50466,6 @@ func (s *InstanceExportDetails) SetTargetEnvironment(v string) *InstanceExportDe } // Describes an IPv6 address. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceIpv6Address type InstanceIpv6Address struct { _ struct{} `type:"structure"` @@ -48615,7 +50490,6 @@ func (s *InstanceIpv6Address) SetIpv6Address(v string) *InstanceIpv6Address { } // Describes an IPv6 address. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceIpv6AddressRequest type InstanceIpv6AddressRequest struct { _ struct{} `type:"structure"` @@ -48640,7 +50514,6 @@ func (s *InstanceIpv6AddressRequest) SetIpv6Address(v string) *InstanceIpv6Addre } // Describes the market (purchasing) option for the instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceMarketOptionsRequest type InstanceMarketOptionsRequest struct { _ struct{} `type:"structure"` @@ -48674,7 +50547,6 @@ func (s *InstanceMarketOptionsRequest) SetSpotOptions(v *SpotMarketOptions) *Ins } // Describes the monitoring of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceMonitoring type InstanceMonitoring struct { _ struct{} `type:"structure"` @@ -48708,7 +50580,6 @@ func (s *InstanceMonitoring) SetMonitoring(v *Monitoring) *InstanceMonitoring { } // Describes a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceNetworkInterface type InstanceNetworkInterface struct { _ struct{} `type:"structure"` @@ -48860,7 +50731,6 @@ func (s *InstanceNetworkInterface) SetVpcId(v string) *InstanceNetworkInterface } // Describes association information for an Elastic IP address (IPv4). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceNetworkInterfaceAssociation type InstanceNetworkInterfaceAssociation struct { _ struct{} `type:"structure"` @@ -48903,12 +50773,11 @@ func (s *InstanceNetworkInterfaceAssociation) SetPublicIp(v string) *InstanceNet } // Describes a network interface attachment. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceNetworkInterfaceAttachment type InstanceNetworkInterfaceAttachment struct { _ struct{} `type:"structure"` // The time stamp when the attachment initiated. - AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` // The ID of the network interface attachment. AttachmentId *string `locationName:"attachmentId" type:"string"` @@ -48964,7 +50833,6 @@ func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetwor } // Describes a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceNetworkInterfaceSpecification type InstanceNetworkInterfaceSpecification struct { _ struct{} `type:"structure"` @@ -49041,26 +50909,6 @@ func (s InstanceNetworkInterfaceSpecification) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *InstanceNetworkInterfaceSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InstanceNetworkInterfaceSpecification"} - if s.PrivateIpAddresses != nil { - for i, v := range s.PrivateIpAddresses { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *InstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *InstanceNetworkInterfaceSpecification { s.AssociatePublicIpAddress = &v @@ -49134,7 +50982,6 @@ func (s *InstanceNetworkInterfaceSpecification) SetSubnetId(v string) *InstanceN } // Describes a private IPv4 address. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstancePrivateIpAddress type InstancePrivateIpAddress struct { _ struct{} `type:"structure"` @@ -49187,11 +51034,10 @@ func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivat } // Describes the current state of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceState type InstanceState struct { _ struct{} `type:"structure"` - // The low byte represents the state. The high byte is an opaque internal value + // The low byte represents the state. The high byte is used for internal purposes // and should be ignored. // // * 0 : pending @@ -49234,7 +51080,6 @@ func (s *InstanceState) SetName(v string) *InstanceState { } // Describes an instance state change. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceStateChange type InstanceStateChange struct { _ struct{} `type:"structure"` @@ -49277,7 +51122,6 @@ func (s *InstanceStateChange) SetPreviousState(v *InstanceState) *InstanceStateC } // Describes the status of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceStatus type InstanceStatus struct { _ struct{} `type:"structure"` @@ -49351,13 +51195,12 @@ func (s *InstanceStatus) SetSystemStatus(v *InstanceStatusSummary) *InstanceStat } // Describes the instance status. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceStatusDetails type InstanceStatusDetails struct { _ struct{} `type:"structure"` // The time when a status check failed. For an instance that was launched and // impaired, this is the time when the instance was launched. - ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp" timestampFormat:"iso8601"` + ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp"` // The type of instance status. Name *string `locationName:"name" type:"string" enum:"StatusName"` @@ -49395,7 +51238,6 @@ func (s *InstanceStatusDetails) SetStatus(v string) *InstanceStatusDetails { } // Describes a scheduled event for an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceStatusEvent type InstanceStatusEvent struct { _ struct{} `type:"structure"` @@ -49410,10 +51252,10 @@ type InstanceStatusEvent struct { Description *string `locationName:"description" type:"string"` // The latest scheduled end time for the event. - NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` // The earliest scheduled start time for the event. - NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` + NotBefore *time.Time `locationName:"notBefore" type:"timestamp"` } // String returns the string representation @@ -49451,7 +51293,6 @@ func (s *InstanceStatusEvent) SetNotBefore(v time.Time) *InstanceStatusEvent { } // Describes the status of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InstanceStatusSummary type InstanceStatusSummary struct { _ struct{} `type:"structure"` @@ -49484,18 +51325,17 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { return s } -// Describes an Internet gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InternetGateway +// Describes an internet gateway. type InternetGateway struct { _ struct{} `type:"structure"` - // Any VPCs attached to the Internet gateway. + // Any VPCs attached to the internet gateway. Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` - // The ID of the Internet gateway. + // The ID of the internet gateway. InternetGatewayId *string `locationName:"internetGatewayId" type:"string"` - // Any tags assigned to the Internet gateway. + // Any tags assigned to the internet gateway. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -49527,13 +51367,13 @@ func (s *InternetGateway) SetTags(v []*Tag) *InternetGateway { return s } -// Describes the attachment of a VPC to an Internet gateway or an egress-only -// Internet gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/InternetGatewayAttachment +// Describes the attachment of a VPC to an internet gateway or an egress-only +// internet gateway. type InternetGatewayAttachment struct { _ struct{} `type:"structure"` - // The current state of the attachment. + // The current state of the attachment. For an internet gateway, the state is + // available when attached to a VPC; otherwise, this value is not returned. State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` // The ID of the VPC. @@ -49563,7 +51403,6 @@ func (s *InternetGatewayAttachment) SetVpcId(v string) *InternetGatewayAttachmen } // Describes a set of permissions for a security group rule. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IpPermission type IpPermission struct { _ struct{} `type:"structure"` @@ -49657,7 +51496,6 @@ func (s *IpPermission) SetUserIdGroupPairs(v []*UserIdGroupPair) *IpPermission { } // Describes an IPv4 range. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IpRange type IpRange struct { _ struct{} `type:"structure"` @@ -49696,7 +51534,6 @@ func (s *IpRange) SetDescription(v string) *IpRange { } // Describes an IPv6 CIDR block. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Ipv6CidrBlock type Ipv6CidrBlock struct { _ struct{} `type:"structure"` @@ -49721,7 +51558,6 @@ func (s *Ipv6CidrBlock) SetIpv6CidrBlock(v string) *Ipv6CidrBlock { } // [EC2-VPC only] Describes an IPv6 range. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Ipv6Range type Ipv6Range struct { _ struct{} `type:"structure"` @@ -49760,7 +51596,6 @@ func (s *Ipv6Range) SetDescription(v string) *Ipv6Range { } // Describes a key pair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/KeyPairInfo type KeyPairInfo struct { _ struct{} `type:"structure"` @@ -49797,7 +51632,6 @@ func (s *KeyPairInfo) SetKeyName(v string) *KeyPairInfo { } // Describes a launch permission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchPermission type LaunchPermission struct { _ struct{} `type:"structure"` @@ -49831,7 +51665,6 @@ func (s *LaunchPermission) SetUserId(v string) *LaunchPermission { } // Describes a launch permission modification. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchPermissionModifications type LaunchPermissionModifications struct { _ struct{} `type:"structure"` @@ -49866,7 +51699,6 @@ func (s *LaunchPermissionModifications) SetRemove(v []*LaunchPermission) *Launch } // Describes the launch specification for an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchSpecification type LaunchSpecification struct { _ struct{} `type:"structure"` @@ -49921,9 +51753,7 @@ type LaunchSpecification struct { // The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` - // The user data to make available to the instances. If you are using an AWS - // SDK or command line tool, Base64-encoding is performed for you, and you can - // load the text from a file. Otherwise, you must provide Base64-encoded text. + // The Base64-encoded user data for the instance. UserData *string `locationName:"userData" type:"string"` } @@ -50028,12 +51858,11 @@ func (s *LaunchSpecification) SetUserData(v string) *LaunchSpecification { } // Describes a launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplate type LaunchTemplate struct { _ struct{} `type:"structure"` // The time launch template was created. - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` // The principal that created the launch template. CreatedBy *string `locationName:"createdBy" type:"string"` @@ -50107,7 +51936,6 @@ func (s *LaunchTemplate) SetTags(v []*Tag) *LaunchTemplate { } // Describes a block device mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateBlockDeviceMapping type LaunchTemplateBlockDeviceMapping struct { _ struct{} `type:"structure"` @@ -50160,7 +51988,6 @@ func (s *LaunchTemplateBlockDeviceMapping) SetVirtualName(v string) *LaunchTempl } // Describes a block device mapping. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateBlockDeviceMappingRequest type LaunchTemplateBlockDeviceMappingRequest struct { _ struct{} `type:"structure"` @@ -50218,7 +52045,6 @@ func (s *LaunchTemplateBlockDeviceMappingRequest) SetVirtualName(v string) *Laun } // Describes a launch template and overrides. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateConfig type LaunchTemplateConfig struct { _ struct{} `type:"structure"` @@ -50267,8 +52093,76 @@ func (s *LaunchTemplateConfig) SetOverrides(v []*LaunchTemplateOverrides) *Launc return s } +// The CPU options for the instance. +type LaunchTemplateCpuOptions struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `locationName:"coreCount" type:"integer"` + + // The number of threads per CPU core. + ThreadsPerCore *int64 `locationName:"threadsPerCore" type:"integer"` +} + +// String returns the string representation +func (s LaunchTemplateCpuOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateCpuOptions) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *LaunchTemplateCpuOptions) SetCoreCount(v int64) *LaunchTemplateCpuOptions { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *LaunchTemplateCpuOptions) SetThreadsPerCore(v int64) *LaunchTemplateCpuOptions { + s.ThreadsPerCore = &v + return s +} + +// The CPU options for the instance. Both the core count and threads per core +// must be specified in the request. +type LaunchTemplateCpuOptionsRequest struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `type:"integer"` + + // The number of threads per CPU core. To disable Intel Hyper-Threading Technology + // for the instance, specify a value of 1. Otherwise, specify the default value + // of 2. + ThreadsPerCore *int64 `type:"integer"` +} + +// String returns the string representation +func (s LaunchTemplateCpuOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateCpuOptionsRequest) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *LaunchTemplateCpuOptionsRequest) SetCoreCount(v int64) *LaunchTemplateCpuOptionsRequest { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *LaunchTemplateCpuOptionsRequest) SetThreadsPerCore(v int64) *LaunchTemplateCpuOptionsRequest { + s.ThreadsPerCore = &v + return s +} + // Describes a block device for an EBS volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateEbsBlockDevice type LaunchTemplateEbsBlockDevice struct { _ struct{} `type:"structure"` @@ -50347,7 +52241,6 @@ func (s *LaunchTemplateEbsBlockDevice) SetVolumeType(v string) *LaunchTemplateEb } // The parameters for a block device for an EBS volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateEbsBlockDeviceRequest type LaunchTemplateEbsBlockDeviceRequest struct { _ struct{} `type:"structure"` @@ -50440,7 +52333,6 @@ func (s *LaunchTemplateEbsBlockDeviceRequest) SetVolumeType(v string) *LaunchTem } // Describes an IAM instance profile. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateIamInstanceProfileSpecification type LaunchTemplateIamInstanceProfileSpecification struct { _ struct{} `type:"structure"` @@ -50474,7 +52366,6 @@ func (s *LaunchTemplateIamInstanceProfileSpecification) SetName(v string) *Launc } // An IAM instance profile. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateIamInstanceProfileSpecificationRequest type LaunchTemplateIamInstanceProfileSpecificationRequest struct { _ struct{} `type:"structure"` @@ -50508,7 +52399,6 @@ func (s *LaunchTemplateIamInstanceProfileSpecificationRequest) SetName(v string) } // The market (purchasing) option for the instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateInstanceMarketOptions type LaunchTemplateInstanceMarketOptions struct { _ struct{} `type:"structure"` @@ -50542,7 +52432,6 @@ func (s *LaunchTemplateInstanceMarketOptions) SetSpotOptions(v *LaunchTemplateSp } // The market (purchasing) option for the instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateInstanceMarketOptionsRequest type LaunchTemplateInstanceMarketOptionsRequest struct { _ struct{} `type:"structure"` @@ -50576,7 +52465,6 @@ func (s *LaunchTemplateInstanceMarketOptionsRequest) SetSpotOptions(v *LaunchTem } // Describes a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateInstanceNetworkInterfaceSpecification type LaunchTemplateInstanceNetworkInterfaceSpecification struct { _ struct{} `type:"structure"` @@ -50701,7 +52589,6 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetSubnetId(v stri } // The parameters for a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateInstanceNetworkInterfaceSpecificationRequest type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { _ struct{} `type:"structure"` @@ -50755,26 +52642,6 @@ func (s LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) GoString() s return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LaunchTemplateInstanceNetworkInterfaceSpecificationRequest"} - if s.PrivateIpAddresses != nil { - for i, v := range s.PrivateIpAddresses { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { s.AssociatePublicIpAddress = &v @@ -50848,7 +52715,6 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetSubnetId } // Describes overrides for a launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateOverrides type LaunchTemplateOverrides struct { _ struct{} `type:"structure"` @@ -50858,6 +52724,14 @@ type LaunchTemplateOverrides struct { // The instance type. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + // The priority for the launch template override. If OnDemandAllocationStrategy + // is set to prioritized, Spot Fleet uses priority to determine which launch + // template override to use first in fulfilling On-Demand capacity. The highest + // priority is launched first. Valid values are whole numbers starting at 0. + // The lower the number, the higher the priority. If no number is set, the launch + // template override has the lowest priority. + Priority *float64 `locationName:"priority" type:"double"` + // The maximum price per unit hour that you are willing to pay for a Spot Instance. SpotPrice *string `locationName:"spotPrice" type:"string"` @@ -50890,6 +52764,12 @@ func (s *LaunchTemplateOverrides) SetInstanceType(v string) *LaunchTemplateOverr return s } +// SetPriority sets the Priority field's value. +func (s *LaunchTemplateOverrides) SetPriority(v float64) *LaunchTemplateOverrides { + s.Priority = &v + return s +} + // SetSpotPrice sets the SpotPrice field's value. func (s *LaunchTemplateOverrides) SetSpotPrice(v string) *LaunchTemplateOverrides { s.SpotPrice = &v @@ -50909,7 +52789,6 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v float64) *LaunchTemplate } // Describes the placement of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplatePlacement type LaunchTemplatePlacement struct { _ struct{} `type:"structure"` @@ -50980,7 +52859,6 @@ func (s *LaunchTemplatePlacement) SetTenancy(v string) *LaunchTemplatePlacement } // The placement for the instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplatePlacementRequest type LaunchTemplatePlacementRequest struct { _ struct{} `type:"structure"` @@ -51051,8 +52929,7 @@ func (s *LaunchTemplatePlacementRequest) SetTenancy(v string) *LaunchTemplatePla } // The launch template to use. You must specify either the launch template ID -// or launch template name in the request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateSpecification +// or launch template name in the request, but not both. type LaunchTemplateSpecification struct { _ struct{} `type:"structure"` @@ -51097,7 +52974,6 @@ func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecif } // The options for Spot Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateSpotMarketOptions type LaunchTemplateSpotMarketOptions struct { _ struct{} `type:"structure"` @@ -51119,7 +52995,7 @@ type LaunchTemplateSpotMarketOptions struct { // active until all instances launch, the request is canceled, or this date // is reached. If the request is persistent, it remains active until it is canceled // or this date and time is reached. - ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } // String returns the string representation @@ -51163,7 +53039,6 @@ func (s *LaunchTemplateSpotMarketOptions) SetValidUntil(v time.Time) *LaunchTemp } // The options for Spot Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateSpotMarketOptionsRequest type LaunchTemplateSpotMarketOptionsRequest struct { _ struct{} `type:"structure"` @@ -51186,7 +53061,7 @@ type LaunchTemplateSpotMarketOptionsRequest struct { // is reached. If the request is persistent, it remains active until it is canceled // or this date and time is reached. The default end date is 7 days from the // current date. - ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` + ValidUntil *time.Time `type:"timestamp"` } // String returns the string representation @@ -51230,7 +53105,6 @@ func (s *LaunchTemplateSpotMarketOptionsRequest) SetValidUntil(v time.Time) *Lau } // The tag specification for the launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateTagSpecification type LaunchTemplateTagSpecification struct { _ struct{} `type:"structure"` @@ -51264,12 +53138,12 @@ func (s *LaunchTemplateTagSpecification) SetTags(v []*Tag) *LaunchTemplateTagSpe } // The tags specification for the launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateTagSpecificationRequest type LaunchTemplateTagSpecificationRequest struct { _ struct{} `type:"structure"` // The type of resource to tag. Currently, the resource types that support tagging - // on creation are instance and volume. + // on creation are instance and volume. To tag a resource after it has been + // created, see CreateTags. ResourceType *string `type:"string" enum:"ResourceType"` // The tags to apply to the resource. @@ -51299,12 +53173,11 @@ func (s *LaunchTemplateTagSpecificationRequest) SetTags(v []*Tag) *LaunchTemplat } // Describes a launch template version. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplateVersion type LaunchTemplateVersion struct { _ struct{} `type:"structure"` // The time the version was created. - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` // The principal that created the version. CreatedBy *string `locationName:"createdBy" type:"string"` @@ -51387,7 +53260,6 @@ func (s *LaunchTemplateVersion) SetVersionNumber(v int64) *LaunchTemplateVersion } // Describes the monitoring for the instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplatesMonitoring type LaunchTemplatesMonitoring struct { _ struct{} `type:"structure"` @@ -51413,7 +53285,6 @@ func (s *LaunchTemplatesMonitoring) SetEnabled(v bool) *LaunchTemplatesMonitorin } // Describes the monitoring for the instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LaunchTemplatesMonitoringRequest type LaunchTemplatesMonitoringRequest struct { _ struct{} `type:"structure"` @@ -51440,7 +53311,6 @@ func (s *LaunchTemplatesMonitoringRequest) SetEnabled(v bool) *LaunchTemplatesMo // Describes the Classic Load Balancers and target groups to attach to a Spot // Fleet request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadBalancersConfig type LoadBalancersConfig struct { _ struct{} `type:"structure"` @@ -51494,7 +53364,6 @@ func (s *LoadBalancersConfig) SetTargetGroupsConfig(v *TargetGroupsConfig) *Load } // Describes a load permission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermission type LoadPermission struct { _ struct{} `type:"structure"` @@ -51528,7 +53397,6 @@ func (s *LoadPermission) SetUserId(v string) *LoadPermission { } // Describes modifications to the load permissions of an Amazon FPGA image (AFI). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionModifications type LoadPermissionModifications struct { _ struct{} `type:"structure"` @@ -51562,7 +53430,6 @@ func (s *LoadPermissionModifications) SetRemove(v []*LoadPermissionRequest) *Loa } // Describes a load permission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionRequest type LoadPermissionRequest struct { _ struct{} `type:"structure"` @@ -51595,7 +53462,109 @@ func (s *LoadPermissionRequest) SetUserId(v string) *LoadPermissionRequest { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeRequest +type ModifyFleetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether running instances should be terminated if the total target + // capacity of the EC2 Fleet is decreased below the current size of the EC2 + // Fleet. + ExcessCapacityTerminationPolicy *string `type:"string" enum:"FleetExcessCapacityTerminationPolicy"` + + // The ID of the EC2 Fleet. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // The size of the EC2 Fleet. + // + // TargetCapacitySpecification is a required field + TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ModifyFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyFleetInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.TargetCapacitySpecification == nil { + invalidParams.Add(request.NewErrParamRequired("TargetCapacitySpecification")) + } + if s.TargetCapacitySpecification != nil { + if err := s.TargetCapacitySpecification.Validate(); err != nil { + invalidParams.AddNested("TargetCapacitySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyFleetInput) SetDryRun(v bool) *ModifyFleetInput { + s.DryRun = &v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *ModifyFleetInput) SetExcessCapacityTerminationPolicy(v string) *ModifyFleetInput { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *ModifyFleetInput) SetFleetId(v string) *ModifyFleetInput { + s.FleetId = &v + return s +} + +// SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. +func (s *ModifyFleetInput) SetTargetCapacitySpecification(v *TargetCapacitySpecificationRequest) *ModifyFleetInput { + s.TargetCapacitySpecification = v + return s +} + +type ModifyFleetOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFleetOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyFleetOutput) SetReturn(v bool) *ModifyFleetOutput { + s.Return = &v + return s +} + type ModifyFpgaImageAttributeInput struct { _ struct{} `type:"structure"` @@ -51722,7 +53691,6 @@ func (s *ModifyFpgaImageAttributeInput) SetUserIds(v []*string) *ModifyFpgaImage return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeResult type ModifyFpgaImageAttributeOutput struct { _ struct{} `type:"structure"` @@ -51747,7 +53715,6 @@ func (s *ModifyFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttri } // Contains the parameters for ModifyHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHostsRequest type ModifyHostsInput struct { _ struct{} `type:"structure"` @@ -51756,7 +53723,7 @@ type ModifyHostsInput struct { // AutoPlacement is a required field AutoPlacement *string `locationName:"autoPlacement" type:"string" required:"true" enum:"AutoPlacement"` - // The host IDs of the Dedicated Hosts you want to modify. + // The IDs of the Dedicated Hosts to modify. // // HostIds is a required field HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` @@ -51801,7 +53768,6 @@ func (s *ModifyHostsInput) SetHostIds(v []*string) *ModifyHostsInput { } // Contains the output of ModifyHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHostsResult type ModifyHostsOutput struct { _ struct{} `type:"structure"` @@ -51836,11 +53802,19 @@ func (s *ModifyHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ModifyHostsO } // Contains the parameters of ModifyIdFormat. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdFormatRequest type ModifyIdFormatInput struct { _ struct{} `type:"structure"` - // The type of resource: instance | reservation | snapshot | volume + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | route-table + // | route-table-association | security-group | subnet | subnet-cidr-block-association + // | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection + // | vpn-connection | vpn-gateway. + // + // Alternatively, use the all-current option to include all resource types that + // are currently within their opt-in period for longer IDs. // // Resource is a required field Resource *string `type:"string" required:"true"` @@ -51889,7 +53863,6 @@ func (s *ModifyIdFormatInput) SetUseLongIds(v bool) *ModifyIdFormatInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdFormatOutput type ModifyIdFormatOutput struct { _ struct{} `type:"structure"` } @@ -51905,7 +53878,6 @@ func (s ModifyIdFormatOutput) GoString() string { } // Contains the parameters of ModifyIdentityIdFormat. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdentityIdFormatRequest type ModifyIdentityIdFormatInput struct { _ struct{} `type:"structure"` @@ -51916,7 +53888,16 @@ type ModifyIdentityIdFormatInput struct { // PrincipalArn is a required field PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` - // The type of resource: instance | reservation | snapshot | volume + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | route-table + // | route-table-association | security-group | subnet | subnet-cidr-block-association + // | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection + // | vpn-connection | vpn-gateway. + // + // Alternatively, use the all-current option to include all resource types that + // are currently within their opt-in period for longer IDs. // // Resource is a required field Resource *string `locationName:"resource" type:"string" required:"true"` @@ -51974,7 +53955,6 @@ func (s *ModifyIdentityIdFormatInput) SetUseLongIds(v bool) *ModifyIdentityIdFor return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdentityIdFormatOutput type ModifyIdentityIdFormatOutput struct { _ struct{} `type:"structure"` } @@ -51990,7 +53970,6 @@ func (s ModifyIdentityIdFormatOutput) GoString() string { } // Contains the parameters for ModifyImageAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyImageAttributeRequest type ModifyImageAttributeInput struct { _ struct{} `type:"structure"` @@ -52119,7 +54098,6 @@ func (s *ModifyImageAttributeInput) SetValue(v string) *ModifyImageAttributeInpu return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyImageAttributeOutput type ModifyImageAttributeOutput struct { _ struct{} `type:"structure"` } @@ -52135,7 +54113,6 @@ func (s ModifyImageAttributeOutput) GoString() string { } // Contains the parameters for ModifyInstanceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceAttributeRequest type ModifyInstanceAttributeInput struct { _ struct{} `type:"structure"` @@ -52351,7 +54328,6 @@ func (s *ModifyInstanceAttributeInput) SetValue(v string) *ModifyInstanceAttribu return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceAttributeOutput type ModifyInstanceAttributeOutput struct { _ struct{} `type:"structure"` } @@ -52366,7 +54342,6 @@ func (s ModifyInstanceAttributeOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceCreditSpecificationRequest type ModifyInstanceCreditSpecificationInput struct { _ struct{} `type:"structure"` @@ -52428,7 +54403,6 @@ func (s *ModifyInstanceCreditSpecificationInput) SetInstanceCreditSpecifications return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceCreditSpecificationResult type ModifyInstanceCreditSpecificationOutput struct { _ struct{} `type:"structure"` @@ -52464,14 +54438,20 @@ func (s *ModifyInstanceCreditSpecificationOutput) SetUnsuccessfulInstanceCreditS } // Contains the parameters for ModifyInstancePlacement. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstancePlacementRequest type ModifyInstancePlacementInput struct { _ struct{} `type:"structure"` - // The new affinity setting for the instance. + // The affinity setting for the instance. Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"` - // The ID of the Dedicated Host that the instance will have affinity with. + // The name of the placement group in which to place the instance. For spread + // placement groups, the instance must have a tenancy of default. For cluster + // placement groups, the instance must have a tenancy of default or dedicated. + // + // To remove an instance from a placement group, specify an empty string (""). + GroupName *string `type:"string"` + + // The ID of the Dedicated Host with which to associate the instance. HostId *string `locationName:"hostId" type:"string"` // The ID of the instance that you are modifying. @@ -52479,7 +54459,7 @@ type ModifyInstancePlacementInput struct { // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` - // The tenancy of the instance that you are modifying. + // The tenancy for the instance. Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"` } @@ -52512,6 +54492,12 @@ func (s *ModifyInstancePlacementInput) SetAffinity(v string) *ModifyInstancePlac return s } +// SetGroupName sets the GroupName field's value. +func (s *ModifyInstancePlacementInput) SetGroupName(v string) *ModifyInstancePlacementInput { + s.GroupName = &v + return s +} + // SetHostId sets the HostId field's value. func (s *ModifyInstancePlacementInput) SetHostId(v string) *ModifyInstancePlacementInput { s.HostId = &v @@ -52531,7 +54517,6 @@ func (s *ModifyInstancePlacementInput) SetTenancy(v string) *ModifyInstancePlace } // Contains the output of ModifyInstancePlacement. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstancePlacementResult type ModifyInstancePlacementOutput struct { _ struct{} `type:"structure"` @@ -52555,7 +54540,6 @@ func (s *ModifyInstancePlacementOutput) SetReturn(v bool) *ModifyInstancePlaceme return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyLaunchTemplateRequest type ModifyLaunchTemplateInput struct { _ struct{} `type:"structure"` @@ -52634,7 +54618,6 @@ func (s *ModifyLaunchTemplateInput) SetLaunchTemplateName(v string) *ModifyLaunc return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyLaunchTemplateResult type ModifyLaunchTemplateOutput struct { _ struct{} `type:"structure"` @@ -52659,7 +54642,6 @@ func (s *ModifyLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Modif } // Contains the parameters for ModifyNetworkInterfaceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyNetworkInterfaceAttributeRequest type ModifyNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` @@ -52754,7 +54736,6 @@ func (s *ModifyNetworkInterfaceAttributeInput) SetSourceDestCheck(v *AttributeBo return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyNetworkInterfaceAttributeOutput type ModifyNetworkInterfaceAttributeOutput struct { _ struct{} `type:"structure"` } @@ -52770,7 +54751,6 @@ func (s ModifyNetworkInterfaceAttributeOutput) GoString() string { } // Contains the parameters for ModifyReservedInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyReservedInstancesRequest type ModifyReservedInstancesInput struct { _ struct{} `type:"structure"` @@ -52834,7 +54814,6 @@ func (s *ModifyReservedInstancesInput) SetTargetConfigurations(v []*ReservedInst } // Contains the output of ModifyReservedInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyReservedInstancesResult type ModifyReservedInstancesOutput struct { _ struct{} `type:"structure"` @@ -52859,13 +54838,11 @@ func (s *ModifyReservedInstancesOutput) SetReservedInstancesModificationId(v str } // Contains the parameters for ModifySnapshotAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySnapshotAttributeRequest type ModifySnapshotAttributeInput struct { _ struct{} `type:"structure"` - // The snapshot attribute to modify. - // - // Only volume creation permissions may be modified at the customer level. + // The snapshot attribute to modify. Only volume creation permissions can be + // modified. Attribute *string `type:"string" enum:"SnapshotAttributeName"` // A JSON representation of the snapshot attribute modification. @@ -52957,7 +54934,6 @@ func (s *ModifySnapshotAttributeInput) SetUserIds(v []*string) *ModifySnapshotAt return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySnapshotAttributeOutput type ModifySnapshotAttributeOutput struct { _ struct{} `type:"structure"` } @@ -52973,7 +54949,6 @@ func (s ModifySnapshotAttributeOutput) GoString() string { } // Contains the parameters for ModifySpotFleetRequest. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySpotFleetRequestRequest type ModifySpotFleetRequestInput struct { _ struct{} `type:"structure"` @@ -53033,7 +55008,6 @@ func (s *ModifySpotFleetRequestInput) SetTargetCapacity(v int64) *ModifySpotFlee } // Contains the output of ModifySpotFleetRequest. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySpotFleetRequestResponse type ModifySpotFleetRequestOutput struct { _ struct{} `type:"structure"` @@ -53058,7 +55032,6 @@ func (s *ModifySpotFleetRequestOutput) SetReturn(v bool) *ModifySpotFleetRequest } // Contains the parameters for ModifySubnetAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySubnetAttributeRequest type ModifySubnetAttributeInput struct { _ struct{} `type:"structure"` @@ -53125,7 +55098,6 @@ func (s *ModifySubnetAttributeInput) SetSubnetId(v string) *ModifySubnetAttribut return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySubnetAttributeOutput type ModifySubnetAttributeOutput struct { _ struct{} `type:"structure"` } @@ -53141,7 +55113,6 @@ func (s ModifySubnetAttributeOutput) GoString() string { } // Contains the parameters for ModifyVolumeAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeAttributeRequest type ModifyVolumeAttributeInput struct { _ struct{} `type:"structure"` @@ -53201,7 +55172,6 @@ func (s *ModifyVolumeAttributeInput) SetVolumeId(v string) *ModifyVolumeAttribut return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeAttributeOutput type ModifyVolumeAttributeOutput struct { _ struct{} `type:"structure"` } @@ -53216,7 +55186,6 @@ func (s ModifyVolumeAttributeOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeRequest type ModifyVolumeInput struct { _ struct{} `type:"structure"` @@ -53226,30 +55195,27 @@ type ModifyVolumeInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // Target IOPS rate of the volume to be modified. + // The target IOPS rate of the volume. // - // Only valid for Provisioned IOPS SSD (io1) volumes. For more information about - // io1 IOPS configuration, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops - // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). + // This is only valid for Provisioned IOPS SSD (io1) volumes. For more information, + // see Provisioned IOPS SSD (io1) Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). // // Default: If no IOPS value is specified, the existing value is retained. Iops *int64 `type:"integer"` - // Target size in GiB of the volume to be modified. Target volume size must - // be greater than or equal to than the existing size of the volume. For information - // about available EBS volume sizes, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html - // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). + // The target size of the volume, in GiB. The target volume size must be greater + // than or equal to than the existing size of the volume. For information about + // available EBS volume sizes, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). // // Default: If no size is specified, the existing size is retained. Size *int64 `type:"integer"` + // The ID of the volume. + // // VolumeId is a required field VolumeId *string `type:"string" required:"true"` - // Target EBS volume type of the volume to be modified - // - // The API does not support modifications for volume type standard. You also - // cannot change the type of a volume to standard. + // The target EBS volume type of the volume. // // Default: If no type is specified, the existing type is retained. VolumeType *string `type:"string" enum:"VolumeType"` @@ -53308,11 +55274,10 @@ func (s *ModifyVolumeInput) SetVolumeType(v string) *ModifyVolumeInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeResult type ModifyVolumeOutput struct { _ struct{} `type:"structure"` - // A VolumeModification object. + // Information about the volume modification. VolumeModification *VolumeModification `locationName:"volumeModification" type:"structure"` } @@ -53333,7 +55298,6 @@ func (s *ModifyVolumeOutput) SetVolumeModification(v *VolumeModification) *Modif } // Contains the parameters for ModifyVpcAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcAttributeRequest type ModifyVpcAttributeInput struct { _ struct{} `type:"structure"` @@ -53348,8 +55312,8 @@ type ModifyVpcAttributeInput struct { // Indicates whether the DNS resolution is supported for the VPC. If enabled, // queries to the Amazon provided DNS server at the 169.254.169.253 IP address, // or the reserved IP address at the base of the VPC network range "plus two" - // will succeed. If disabled, the Amazon provided DNS service in the VPC that - // resolves public DNS hostnames to IP addresses is not enabled. + // succeed. If disabled, the Amazon provided DNS service in the VPC that resolves + // public DNS hostnames to IP addresses is not enabled. // // You cannot modify the DNS resolution and DNS hostnames attributes in the // same request. Use separate requests for each attribute. @@ -53402,7 +55366,6 @@ func (s *ModifyVpcAttributeInput) SetVpcId(v string) *ModifyVpcAttributeInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcAttributeOutput type ModifyVpcAttributeOutput struct { _ struct{} `type:"structure"` } @@ -53417,7 +55380,6 @@ func (s ModifyVpcAttributeOutput) GoString() string { return s.String() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointConnectionNotificationRequest type ModifyVpcEndpointConnectionNotificationInput struct { _ struct{} `type:"structure"` @@ -53487,7 +55449,6 @@ func (s *ModifyVpcEndpointConnectionNotificationInput) SetDryRun(v bool) *Modify return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointConnectionNotificationResult type ModifyVpcEndpointConnectionNotificationOutput struct { _ struct{} `type:"structure"` @@ -53512,7 +55473,6 @@ func (s *ModifyVpcEndpointConnectionNotificationOutput) SetReturnValue(v bool) * } // Contains the parameters for ModifyVpcEndpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointRequest type ModifyVpcEndpointInput struct { _ struct{} `type:"structure"` @@ -53649,7 +55609,6 @@ func (s *ModifyVpcEndpointInput) SetVpcEndpointId(v string) *ModifyVpcEndpointIn return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointResult type ModifyVpcEndpointOutput struct { _ struct{} `type:"structure"` @@ -53673,7 +55632,6 @@ func (s *ModifyVpcEndpointOutput) SetReturn(v bool) *ModifyVpcEndpointOutput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServiceConfigurationRequest type ModifyVpcEndpointServiceConfigurationInput struct { _ struct{} `type:"structure"` @@ -53682,7 +55640,7 @@ type ModifyVpcEndpointServiceConfigurationInput struct { // The Amazon Resource Names (ARNs) of Network Load Balancers to add to your // service configuration. - AddNetworkLoadBalancerArns []*string `locationName:"addNetworkLoadBalancerArn" locationNameList:"item" type:"list"` + AddNetworkLoadBalancerArns []*string `locationName:"AddNetworkLoadBalancerArn" locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have @@ -53692,7 +55650,7 @@ type ModifyVpcEndpointServiceConfigurationInput struct { // The Amazon Resource Names (ARNs) of Network Load Balancers to remove from // your service configuration. - RemoveNetworkLoadBalancerArns []*string `locationName:"removeNetworkLoadBalancerArn" locationNameList:"item" type:"list"` + RemoveNetworkLoadBalancerArns []*string `locationName:"RemoveNetworkLoadBalancerArn" locationNameList:"item" type:"list"` // The ID of the service. // @@ -53753,7 +55711,6 @@ func (s *ModifyVpcEndpointServiceConfigurationInput) SetServiceId(v string) *Mod return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServiceConfigurationResult type ModifyVpcEndpointServiceConfigurationOutput struct { _ struct{} `type:"structure"` @@ -53777,12 +55734,12 @@ func (s *ModifyVpcEndpointServiceConfigurationOutput) SetReturn(v bool) *ModifyV return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServicePermissionsRequest type ModifyVpcEndpointServicePermissionsInput struct { _ struct{} `type:"structure"` - // One or more Amazon Resource Names (ARNs) of principals for which to allow - // permission. Specify * to allow all principals. + // The Amazon Resource Names (ARN) of one or more principals. Permissions are + // granted to the principals in this list. To grant permissions to all principals, + // specify an asterisk (*). AddAllowedPrincipals []*string `locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without @@ -53791,8 +55748,8 @@ type ModifyVpcEndpointServicePermissionsInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // One or more Amazon Resource Names (ARNs) of principals for which to remove - // permission. + // The Amazon Resource Names (ARN) of one or more principals. Permissions are + // revoked for principals in this list. RemoveAllowedPrincipals []*string `locationNameList:"item" type:"list"` // The ID of the service. @@ -53848,7 +55805,6 @@ func (s *ModifyVpcEndpointServicePermissionsInput) SetServiceId(v string) *Modif return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServicePermissionsResult type ModifyVpcEndpointServicePermissionsOutput struct { _ struct{} `type:"structure"` @@ -53872,7 +55828,6 @@ func (s *ModifyVpcEndpointServicePermissionsOutput) SetReturnValue(v bool) *Modi return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcPeeringConnectionOptionsRequest type ModifyVpcPeeringConnectionOptionsInput struct { _ struct{} `type:"structure"` @@ -53941,7 +55896,6 @@ func (s *ModifyVpcPeeringConnectionOptionsInput) SetVpcPeeringConnectionId(v str return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcPeeringConnectionOptionsResult type ModifyVpcPeeringConnectionOptionsOutput struct { _ struct{} `type:"structure"` @@ -53975,7 +55929,6 @@ func (s *ModifyVpcPeeringConnectionOptionsOutput) SetRequesterPeeringConnectionO } // Contains the parameters for ModifyVpcTenancy. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyRequest type ModifyVpcTenancyInput struct { _ struct{} `type:"structure"` @@ -54041,7 +55994,6 @@ func (s *ModifyVpcTenancyInput) SetVpcId(v string) *ModifyVpcTenancyInput { } // Contains the output of ModifyVpcTenancy. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyResult type ModifyVpcTenancyOutput struct { _ struct{} `type:"structure"` @@ -54066,7 +56018,6 @@ func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput } // Contains the parameters for MonitorInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstancesRequest type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -54118,7 +56069,6 @@ func (s *MonitorInstancesInput) SetInstanceIds(v []*string) *MonitorInstancesInp } // Contains the output of MonitorInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstancesResult type MonitorInstancesOutput struct { _ struct{} `type:"structure"` @@ -54143,7 +56093,6 @@ func (s *MonitorInstancesOutput) SetInstanceMonitorings(v []*InstanceMonitoring) } // Describes the monitoring of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Monitoring type Monitoring struct { _ struct{} `type:"structure"` @@ -54169,7 +56118,6 @@ func (s *Monitoring) SetState(v string) *Monitoring { } // Contains the parameters for MoveAddressToVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MoveAddressToVpcRequest type MoveAddressToVpcInput struct { _ struct{} `type:"structure"` @@ -54221,7 +56169,6 @@ func (s *MoveAddressToVpcInput) SetPublicIp(v string) *MoveAddressToVpcInput { } // Contains the output of MoveAddressToVpc. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MoveAddressToVpcResult type MoveAddressToVpcOutput struct { _ struct{} `type:"structure"` @@ -54255,7 +56202,6 @@ func (s *MoveAddressToVpcOutput) SetStatus(v string) *MoveAddressToVpcOutput { } // Describes the status of a moving Elastic IP address. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MovingAddressStatus type MovingAddressStatus struct { _ struct{} `type:"structure"` @@ -54290,15 +56236,14 @@ func (s *MovingAddressStatus) SetPublicIp(v string) *MovingAddressStatus { } // Describes a NAT gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NatGateway type NatGateway struct { _ struct{} `type:"structure"` // The date and time the NAT gateway was created. - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` // The date and time the NAT gateway was deleted, if applicable. - DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp" timestampFormat:"iso8601"` + DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp"` // If the NAT gateway could not be created, specifies the error code for the // failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound @@ -54445,7 +56390,6 @@ func (s *NatGateway) SetVpcId(v string) *NatGateway { } // Describes the IP addresses and network interface associated with a NAT gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NatGatewayAddress type NatGatewayAddress struct { _ struct{} `type:"structure"` @@ -54498,7 +56442,6 @@ func (s *NatGatewayAddress) SetPublicIp(v string) *NatGatewayAddress { } // Describes a network ACL. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkAcl type NetworkAcl struct { _ struct{} `type:"structure"` @@ -54568,7 +56511,6 @@ func (s *NetworkAcl) SetVpcId(v string) *NetworkAcl { } // Describes an association between a network ACL and a subnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkAclAssociation type NetworkAclAssociation struct { _ struct{} `type:"structure"` @@ -54611,7 +56553,6 @@ func (s *NetworkAclAssociation) SetSubnetId(v string) *NetworkAclAssociation { } // Describes an entry in a network ACL. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkAclEntry type NetworkAclEntry struct { _ struct{} `type:"structure"` @@ -54701,7 +56642,6 @@ func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry { } // Describes a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterface type NetworkInterface struct { _ struct{} `type:"structure"` @@ -54899,7 +56839,6 @@ func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { } // Describes association information for an Elastic IP address (IPv4 only). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfaceAssociation type NetworkInterfaceAssociation struct { _ struct{} `type:"structure"` @@ -54960,12 +56899,11 @@ func (s *NetworkInterfaceAssociation) SetPublicIp(v string) *NetworkInterfaceAss } // Describes a network interface attachment. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfaceAttachment type NetworkInterfaceAttachment struct { _ struct{} `type:"structure"` // The timestamp indicating when the attachment initiated. - AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` // The ID of the network interface attachment. AttachmentId *string `locationName:"attachmentId" type:"string"` @@ -55039,7 +56977,6 @@ func (s *NetworkInterfaceAttachment) SetStatus(v string) *NetworkInterfaceAttach } // Describes an attachment change. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfaceAttachmentChanges type NetworkInterfaceAttachmentChanges struct { _ struct{} `type:"structure"` @@ -55073,7 +57010,6 @@ func (s *NetworkInterfaceAttachmentChanges) SetDeleteOnTermination(v bool) *Netw } // Describes an IPv6 address associated with a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfaceIpv6Address type NetworkInterfaceIpv6Address struct { _ struct{} `type:"structure"` @@ -55098,7 +57034,6 @@ func (s *NetworkInterfaceIpv6Address) SetIpv6Address(v string) *NetworkInterface } // Describes a permission for a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePermission type NetworkInterfacePermission struct { _ struct{} `type:"structure"` @@ -55168,7 +57103,6 @@ func (s *NetworkInterfacePermission) SetPermissionState(v *NetworkInterfacePermi } // Describes the state of a network interface permission. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePermissionState type NetworkInterfacePermissionState struct { _ struct{} `type:"structure"` @@ -55202,7 +57136,6 @@ func (s *NetworkInterfacePermissionState) SetStatusMessage(v string) *NetworkInt } // Describes the private IPv4 address of a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePrivateIpAddress type NetworkInterfacePrivateIpAddress struct { _ struct{} `type:"structure"` @@ -55255,7 +57188,6 @@ func (s *NetworkInterfacePrivateIpAddress) SetPrivateIpAddress(v string) *Networ return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NewDhcpConfiguration type NewDhcpConfiguration struct { _ struct{} `type:"structure"` @@ -55286,9 +57218,66 @@ func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration { return s } +// The allocation strategy of On-Demand Instances in an EC2 Fleet. +type OnDemandOptions struct { + _ struct{} `type:"structure"` + + // The order of the launch template overrides to use in fulfilling On-Demand + // capacity. If you specify lowest-price, EC2 Fleet uses price to determine + // the order, launching the lowest price first. If you specify prioritized, + // EC2 Fleet uses the priority that you assigned to each launch template override, + // launching the highest priority first. If you do not specify a value, EC2 + // Fleet defaults to lowest-price. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"FleetOnDemandAllocationStrategy"` +} + +// String returns the string representation +func (s OnDemandOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnDemandOptions) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *OnDemandOptions) SetAllocationStrategy(v string) *OnDemandOptions { + s.AllocationStrategy = &v + return s +} + +// The allocation strategy of On-Demand Instances in an EC2 Fleet. +type OnDemandOptionsRequest struct { + _ struct{} `type:"structure"` + + // The order of the launch template overrides to use in fulfilling On-Demand + // capacity. If you specify lowest-price, EC2 Fleet uses price to determine + // the order, launching the lowest price first. If you specify prioritized, + // EC2 Fleet uses the priority that you assigned to each launch template override, + // launching the highest priority first. If you do not specify a value, EC2 + // Fleet defaults to lowest-price. + AllocationStrategy *string `type:"string" enum:"FleetOnDemandAllocationStrategy"` +} + +// String returns the string representation +func (s OnDemandOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnDemandOptionsRequest) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *OnDemandOptionsRequest) SetAllocationStrategy(v string) *OnDemandOptionsRequest { + s.AllocationStrategy = &v + return s +} + // Describes the data that identifies an Amazon FPGA image (AFI) on the PCI // bus. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PciId type PciId struct { _ struct{} `type:"structure"` @@ -55340,12 +57329,11 @@ func (s *PciId) SetVendorId(v string) *PciId { } // Describes the VPC peering connection options. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PeeringConnectionOptions type PeeringConnectionOptions struct { _ struct{} `type:"structure"` - // If true, enables a local VPC to resolve public DNS hostnames to private IP - // addresses when queried from instances in the peer VPC. + // If true, the public DNS hostnames of instances in the specified VPC resolve + // to private IP addresses when queried from instances in the peer VPC. AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"` // If true, enables outbound communication from an EC2-Classic instance that's @@ -55386,7 +57374,6 @@ func (s *PeeringConnectionOptions) SetAllowEgressFromLocalVpcToRemoteClassicLink } // The VPC peering connection options. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PeeringConnectionOptionsRequest type PeeringConnectionOptionsRequest struct { _ struct{} `type:"structure"` @@ -55432,7 +57419,6 @@ func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalVpcToRemoteClas } // Describes the placement of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Placement type Placement struct { _ struct{} `type:"structure"` @@ -55443,7 +57429,7 @@ type Placement struct { // The Availability Zone of the instance. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The name of the placement group the instance is in (for cluster compute instances). + // The name of the placement group the instance is in. GroupName *string `locationName:"groupName" type:"string"` // The ID of the Dedicated Host on which the instance resides. This parameter @@ -55506,7 +57492,6 @@ func (s *Placement) SetTenancy(v string) *Placement { } // Describes a placement group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PlacementGroup type PlacementGroup struct { _ struct{} `type:"structure"` @@ -55549,7 +57534,6 @@ func (s *PlacementGroup) SetStrategy(v string) *PlacementGroup { } // Describes a range of ports. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PortRange type PortRange struct { _ struct{} `type:"structure"` @@ -55583,7 +57567,6 @@ func (s *PortRange) SetTo(v int64) *PortRange { } // Describes prefixes for AWS services. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PrefixList type PrefixList struct { _ struct{} `type:"structure"` @@ -55626,7 +57609,6 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { } // [EC2-VPC only] The ID of the prefix. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PrefixListId type PrefixListId struct { _ struct{} `type:"structure"` @@ -55664,7 +57646,6 @@ func (s *PrefixListId) SetPrefixListId(v string) *PrefixListId { } // Describes the price for a Reserved Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PriceSchedule type PriceSchedule struct { _ struct{} `type:"structure"` @@ -55727,7 +57708,6 @@ func (s *PriceSchedule) SetTerm(v int64) *PriceSchedule { } // Describes the price for a Reserved Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PriceScheduleSpecification type PriceScheduleSpecification struct { _ struct{} `type:"structure"` @@ -55772,7 +57752,6 @@ func (s *PriceScheduleSpecification) SetTerm(v int64) *PriceScheduleSpecificatio } // Describes a Reserved Instance offering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PricingDetail type PricingDetail struct { _ struct{} `type:"structure"` @@ -55805,8 +57784,40 @@ func (s *PricingDetail) SetPrice(v float64) *PricingDetail { return s } +// PrincipalIdFormat description +type PrincipalIdFormat struct { + _ struct{} `type:"structure"` + + // PrincipalIdFormatARN description + Arn *string `locationName:"arn" type:"string"` + + // PrincipalIdFormatStatuses description + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PrincipalIdFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrincipalIdFormat) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *PrincipalIdFormat) SetArn(v string) *PrincipalIdFormat { + s.Arn = &v + return s +} + +// SetStatuses sets the Statuses field's value. +func (s *PrincipalIdFormat) SetStatuses(v []*IdFormat) *PrincipalIdFormat { + s.Statuses = v + return s +} + // Describes a secondary private IPv4 address for a network interface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PrivateIpAddressSpecification type PrivateIpAddressSpecification struct { _ struct{} `type:"structure"` @@ -55815,9 +57826,7 @@ type PrivateIpAddressSpecification struct { Primary *bool `locationName:"primary" type:"boolean"` // The private IPv4 addresses. - // - // PrivateIpAddress is a required field - PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"` + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` } // String returns the string representation @@ -55830,19 +57839,6 @@ func (s PrivateIpAddressSpecification) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PrivateIpAddressSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PrivateIpAddressSpecification"} - if s.PrivateIpAddress == nil { - invalidParams.Add(request.NewErrParamRequired("PrivateIpAddress")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetPrimary sets the Primary field's value. func (s *PrivateIpAddressSpecification) SetPrimary(v bool) *PrivateIpAddressSpecification { s.Primary = &v @@ -55856,7 +57852,6 @@ func (s *PrivateIpAddressSpecification) SetPrivateIpAddress(v string) *PrivateIp } // Describes a product code. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ProductCode type ProductCode struct { _ struct{} `type:"structure"` @@ -55890,11 +57885,10 @@ func (s *ProductCode) SetProductCodeType(v string) *ProductCode { } // Describes a virtual private gateway propagating route. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PropagatingVgw type PropagatingVgw struct { _ struct{} `type:"structure"` - // The ID of the virtual private gateway (VGW). + // The ID of the virtual private gateway. GatewayId *string `locationName:"gatewayId" type:"string"` } @@ -55917,14 +57911,13 @@ func (s *PropagatingVgw) SetGatewayId(v string) *PropagatingVgw { // Reserved. If you need to sustain traffic greater than the documented limits // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ProvisionedBandwidth type ProvisionedBandwidth struct { _ struct{} `type:"structure"` // Reserved. If you need to sustain traffic greater than the documented limits // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). - ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp" timestampFormat:"iso8601"` + ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp"` // Reserved. If you need to sustain traffic greater than the documented limits // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), @@ -55934,7 +57927,7 @@ type ProvisionedBandwidth struct { // Reserved. If you need to sustain traffic greater than the documented limits // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), // contact us through the Support Center (https://console.aws.amazon.com/support/home?). - RequestTime *time.Time `locationName:"requestTime" type:"timestamp" timestampFormat:"iso8601"` + RequestTime *time.Time `locationName:"requestTime" type:"timestamp"` // Reserved. If you need to sustain traffic greater than the documented limits // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), @@ -55988,7 +57981,6 @@ func (s *ProvisionedBandwidth) SetStatus(v string) *ProvisionedBandwidth { } // Describes the result of the purchase. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Purchase type Purchase struct { _ struct{} `type:"structure"` @@ -56077,7 +58069,6 @@ func (s *Purchase) SetUpfrontPrice(v string) *Purchase { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseHostReservationRequest type PurchaseHostReservationInput struct { _ struct{} `type:"structure"` @@ -56090,8 +58081,7 @@ type PurchaseHostReservationInput struct { // amounts are specified. At this time, the only supported currency is USD. CurrencyCode *string `type:"string" enum:"CurrencyCodeValues"` - // The ID/s of the Dedicated Host/s that the reservation will be associated - // with. + // The IDs of the Dedicated Hosts with which the reservation will be associated. // // HostIdSet is a required field HostIdSet []*string `locationNameList:"item" type:"list" required:"true"` @@ -56099,10 +58089,9 @@ type PurchaseHostReservationInput struct { // The specified limit is checked against the total upfront cost of the reservation // (calculated as the offering's upfront cost multiplied by the host count). // If the total upfront cost is greater than the specified price limit, the - // request will fail. This is used to ensure that the purchase does not exceed - // the expected upfront cost of the purchase. At this time, the only supported - // currency is USD. For example, to indicate a limit price of USD 100, specify - // 100.00. + // request fails. This is used to ensure that the purchase does not exceed the + // expected upfront cost of the purchase. At this time, the only supported currency + // is USD. For example, to indicate a limit price of USD 100, specify 100.00. LimitPrice *string `type:"string"` // The ID of the offering. @@ -56167,13 +58156,12 @@ func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostRese return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseHostReservationResult type PurchaseHostReservationOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier you provide to ensure idempotency of the // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide + // in the Amazon Elastic Compute Cloud User Guide. ClientToken *string `locationName:"clientToken" type:"string"` // The currency in which the totalUpfrontPrice and totalHourlyPrice amounts @@ -56186,8 +58174,7 @@ type PurchaseHostReservationOutput struct { // The total hourly price of the reservation calculated per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` - // The total amount that will be charged to your account when you purchase the - // reservation. + // The total amount charged to your account when you purchase the reservation. TotalUpfrontPrice *string `locationName:"totalUpfrontPrice" type:"string"` } @@ -56232,7 +58219,6 @@ func (s *PurchaseHostReservationOutput) SetTotalUpfrontPrice(v string) *Purchase } // Describes a request to purchase Scheduled Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseRequest type PurchaseRequest struct { _ struct{} `type:"structure"` @@ -56286,7 +58272,6 @@ func (s *PurchaseRequest) SetPurchaseToken(v string) *PurchaseRequest { } // Contains the parameters for PurchaseReservedInstancesOffering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseReservedInstancesOfferingRequest type PurchaseReservedInstancesOfferingInput struct { _ struct{} `type:"structure"` @@ -56363,7 +58348,6 @@ func (s *PurchaseReservedInstancesOfferingInput) SetReservedInstancesOfferingId( } // Contains the output of PurchaseReservedInstancesOffering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseReservedInstancesOfferingResult type PurchaseReservedInstancesOfferingOutput struct { _ struct{} `type:"structure"` @@ -56388,7 +58372,6 @@ func (s *PurchaseReservedInstancesOfferingOutput) SetReservedInstancesId(v strin } // Contains the parameters for PurchaseScheduledInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseScheduledInstancesRequest type PurchaseScheduledInstancesInput struct { _ struct{} `type:"structure"` @@ -56463,7 +58446,6 @@ func (s *PurchaseScheduledInstancesInput) SetPurchaseRequests(v []*PurchaseReque } // Contains the output of PurchaseScheduledInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseScheduledInstancesResult type PurchaseScheduledInstancesOutput struct { _ struct{} `type:"structure"` @@ -56488,7 +58470,6 @@ func (s *PurchaseScheduledInstancesOutput) SetScheduledInstanceSet(v []*Schedule } // Contains the parameters for RebootInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RebootInstancesRequest type RebootInstancesInput struct { _ struct{} `type:"structure"` @@ -56539,7 +58520,6 @@ func (s *RebootInstancesInput) SetInstanceIds(v []*string) *RebootInstancesInput return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RebootInstancesOutput type RebootInstancesOutput struct { _ struct{} `type:"structure"` } @@ -56555,7 +58535,6 @@ func (s RebootInstancesOutput) GoString() string { } // Describes a recurring charge. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RecurringCharge type RecurringCharge struct { _ struct{} `type:"structure"` @@ -56589,7 +58568,6 @@ func (s *RecurringCharge) SetFrequency(v string) *RecurringCharge { } // Describes a region. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Region type Region struct { _ struct{} `type:"structure"` @@ -56623,7 +58601,6 @@ func (s *Region) SetRegionName(v string) *Region { } // Contains the parameters for RegisterImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterImageRequest type RegisterImageInput struct { _ struct{} `type:"structure"` @@ -56796,7 +58773,6 @@ func (s *RegisterImageInput) SetVirtualizationType(v string) *RegisterImageInput } // Contains the output of RegisterImage. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterImageResult type RegisterImageOutput struct { _ struct{} `type:"structure"` @@ -56820,7 +58796,6 @@ func (s *RegisterImageOutput) SetImageId(v string) *RegisterImageOutput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcEndpointConnectionsRequest type RejectVpcEndpointConnectionsInput struct { _ struct{} `type:"structure"` @@ -56885,7 +58860,6 @@ func (s *RejectVpcEndpointConnectionsInput) SetVpcEndpointIds(v []*string) *Reje return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcEndpointConnectionsResult type RejectVpcEndpointConnectionsOutput struct { _ struct{} `type:"structure"` @@ -56910,7 +58884,6 @@ func (s *RejectVpcEndpointConnectionsOutput) SetUnsuccessful(v []*UnsuccessfulIt } // Contains the parameters for RejectVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcPeeringConnectionRequest type RejectVpcPeeringConnectionInput struct { _ struct{} `type:"structure"` @@ -56962,7 +58935,6 @@ func (s *RejectVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *R } // Contains the output of RejectVpcPeeringConnection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcPeeringConnectionResult type RejectVpcPeeringConnectionOutput struct { _ struct{} `type:"structure"` @@ -56987,7 +58959,6 @@ func (s *RejectVpcPeeringConnectionOutput) SetReturn(v bool) *RejectVpcPeeringCo } // Contains the parameters for ReleaseAddress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseAddressRequest type ReleaseAddressInput struct { _ struct{} `type:"structure"` @@ -57032,7 +59003,6 @@ func (s *ReleaseAddressInput) SetPublicIp(v string) *ReleaseAddressInput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseAddressOutput type ReleaseAddressOutput struct { _ struct{} `type:"structure"` } @@ -57048,11 +59018,10 @@ func (s ReleaseAddressOutput) GoString() string { } // Contains the parameters for ReleaseHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseHostsRequest type ReleaseHostsInput struct { _ struct{} `type:"structure"` - // The IDs of the Dedicated Hosts you want to release. + // The IDs of the Dedicated Hosts to release. // // HostIds is a required field HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` @@ -57088,7 +59057,6 @@ func (s *ReleaseHostsInput) SetHostIds(v []*string) *ReleaseHostsInput { } // Contains the output of ReleaseHosts. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseHostsResult type ReleaseHostsOutput struct { _ struct{} `type:"structure"` @@ -57122,7 +59090,6 @@ func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHost return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociationRequest type ReplaceIamInstanceProfileAssociationInput struct { _ struct{} `type:"structure"` @@ -57175,7 +59142,6 @@ func (s *ReplaceIamInstanceProfileAssociationInput) SetIamInstanceProfile(v *Iam return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociationResult type ReplaceIamInstanceProfileAssociationOutput struct { _ struct{} `type:"structure"` @@ -57200,7 +59166,6 @@ func (s *ReplaceIamInstanceProfileAssociationOutput) SetIamInstanceProfileAssoci } // Contains the parameters for ReplaceNetworkAclAssociation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclAssociationRequest type ReplaceNetworkAclAssociationInput struct { _ struct{} `type:"structure"` @@ -57267,7 +59232,6 @@ func (s *ReplaceNetworkAclAssociationInput) SetNetworkAclId(v string) *ReplaceNe } // Contains the output of ReplaceNetworkAclAssociation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclAssociationResult type ReplaceNetworkAclAssociationOutput struct { _ struct{} `type:"structure"` @@ -57292,7 +59256,6 @@ func (s *ReplaceNetworkAclAssociationOutput) SetNewAssociationId(v string) *Repl } // Contains the parameters for ReplaceNetworkAclEntry. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclEntryRequest type ReplaceNetworkAclEntryInput struct { _ struct{} `type:"structure"` @@ -57331,10 +59294,10 @@ type ReplaceNetworkAclEntryInput struct { // The IP protocol. You can specify all or -1 to mean all protocols. If you // specify all, -1, or a protocol number other than tcp, udp, or icmp, traffic // on all ports is allowed, regardless of any ports or ICMP types or codes you - // specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, - // traffic for all ICMP types and codes allowed, regardless of any that you - // specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, - // you must specify an ICMP type and code. + // that specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR + // block, traffic for all ICMP types and codes allowed, regardless of any that + // you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR + // block, you must specify an ICMP type and code. // // Protocol is a required field Protocol *string `locationName:"protocol" type:"string" required:"true"` @@ -57445,7 +59408,6 @@ func (s *ReplaceNetworkAclEntryInput) SetRuleNumber(v int64) *ReplaceNetworkAclE return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclEntryOutput type ReplaceNetworkAclEntryOutput struct { _ struct{} `type:"structure"` } @@ -57461,16 +59423,15 @@ func (s ReplaceNetworkAclEntryOutput) GoString() string { } // Contains the parameters for ReplaceRoute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteRequest type ReplaceRouteInput struct { _ struct{} `type:"structure"` - // The IPv4 CIDR address block used for the destination match. The value you - // provide must match the CIDR of an existing route in the table. + // The IPv4 CIDR address block used for the destination match. The value that + // you provide must match the CIDR of an existing route in the table. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` - // The IPv6 CIDR address block used for the destination match. The value you - // provide must match the CIDR of an existing route in the table. + // The IPv6 CIDR address block used for the destination match. The value that + // you provide must match the CIDR of an existing route in the table. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` // Checks whether you have the required permissions for the action, without @@ -57479,10 +59440,10 @@ type ReplaceRouteInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // [IPv6 traffic only] The ID of an egress-only Internet gateway. + // [IPv6 traffic only] The ID of an egress-only internet gateway. EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` - // The ID of an Internet gateway or virtual private gateway. + // The ID of an internet gateway or virtual private gateway. GatewayId *string `locationName:"gatewayId" type:"string"` // The ID of a NAT instance in your VPC. @@ -57586,7 +59547,6 @@ func (s *ReplaceRouteInput) SetVpcPeeringConnectionId(v string) *ReplaceRouteInp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteOutput type ReplaceRouteOutput struct { _ struct{} `type:"structure"` } @@ -57602,7 +59562,6 @@ func (s ReplaceRouteOutput) GoString() string { } // Contains the parameters for ReplaceRouteTableAssociation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteTableAssociationRequest type ReplaceRouteTableAssociationInput struct { _ struct{} `type:"structure"` @@ -57668,7 +59627,6 @@ func (s *ReplaceRouteTableAssociationInput) SetRouteTableId(v string) *ReplaceRo } // Contains the output of ReplaceRouteTableAssociation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteTableAssociationResult type ReplaceRouteTableAssociationOutput struct { _ struct{} `type:"structure"` @@ -57693,7 +59651,6 @@ func (s *ReplaceRouteTableAssociationOutput) SetNewAssociationId(v string) *Repl } // Contains the parameters for ReportInstanceStatus. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReportInstanceStatusRequest type ReportInstanceStatusInput struct { _ struct{} `type:"structure"` @@ -57707,7 +59664,7 @@ type ReportInstanceStatusInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // The time at which the reported instance health state ended. - EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + EndTime *time.Time `locationName:"endTime" type:"timestamp"` // One or more instances. // @@ -57741,7 +59698,7 @@ type ReportInstanceStatusInput struct { ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` // The time at which the reported instance health state began. - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + StartTime *time.Time `locationName:"startTime" type:"timestamp"` // The status of all instances listed. // @@ -57820,7 +59777,6 @@ func (s *ReportInstanceStatusInput) SetStatus(v string) *ReportInstanceStatusInp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReportInstanceStatusOutput type ReportInstanceStatusOutput struct { _ struct{} `type:"structure"` } @@ -57836,7 +59792,6 @@ func (s ReportInstanceStatusOutput) GoString() string { } // The information to include in the launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestLaunchTemplateData type RequestLaunchTemplateData struct { _ struct{} `type:"structure"` @@ -57849,6 +59804,11 @@ type RequestLaunchTemplateData struct { // cannot be changed using this action. BlockDeviceMappings []*LaunchTemplateBlockDeviceMappingRequest `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + // The CPU options for the instance. For more information, see Optimizing CPU + // Options (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon Elastic Compute Cloud User Guide. + CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"` + // The credit option for CPU usage of the instance. Valid for T2 instances only. CreditSpecification *CreditSpecificationRequest `type:"structure"` @@ -57925,17 +59885,16 @@ type RequestLaunchTemplateData struct { // group ID and security name in the same request. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` - // The tags to apply to the resources during launch. You can tag instances and - // volumes. The specified tags are applied to all instances or volumes that - // are created during launch. + // The tags to apply to the resources during launch. You can only tag instances + // and volumes on launch. The specified tags are applied to all instances or + // volumes that are created during launch. To tag a resource after it has been + // created, see CreateTags. TagSpecifications []*LaunchTemplateTagSpecificationRequest `locationName:"TagSpecification" locationNameList:"LaunchTemplateTagSpecificationRequest" type:"list"` - // The user data to make available to the instance. For more information, see - // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // The Base64-encoded user data to make available to the instance. For more + // information, see Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - // (Windows). If you are using a command line tool, base64-encoding is performed - // for you and you can load the text from a file. Otherwise, you must provide - // base64-encoded text. + // (Windows). UserData *string `type:"string"` } @@ -57967,16 +59926,6 @@ func (s *RequestLaunchTemplateData) Validate() error { } } } - if s.NetworkInterfaces != nil { - for i, v := range s.NetworkInterfaces { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -57990,6 +59939,12 @@ func (s *RequestLaunchTemplateData) SetBlockDeviceMappings(v []*LaunchTemplateBl return s } +// SetCpuOptions sets the CpuOptions field's value. +func (s *RequestLaunchTemplateData) SetCpuOptions(v *LaunchTemplateCpuOptionsRequest) *RequestLaunchTemplateData { + s.CpuOptions = v + return s +} + // SetCreditSpecification sets the CreditSpecification field's value. func (s *RequestLaunchTemplateData) SetCreditSpecification(v *CreditSpecificationRequest) *RequestLaunchTemplateData { s.CreditSpecification = v @@ -58105,7 +60060,6 @@ func (s *RequestLaunchTemplateData) SetUserData(v string) *RequestLaunchTemplate } // Contains the parameters for RequestSpotFleet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotFleetRequest type RequestSpotFleetInput struct { _ struct{} `type:"structure"` @@ -58162,7 +60116,6 @@ func (s *RequestSpotFleetInput) SetSpotFleetRequestConfig(v *SpotFleetRequestCon } // Contains the output of RequestSpotFleet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotFleetResponse type RequestSpotFleetOutput struct { _ struct{} `type:"structure"` @@ -58189,7 +60142,6 @@ func (s *RequestSpotFleetOutput) SetSpotFleetRequestId(v string) *RequestSpotFle } // Contains the parameters for RequestSpotInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotInstancesRequest type RequestSpotInstancesInput struct { _ struct{} `type:"structure"` @@ -58223,13 +60175,13 @@ type RequestSpotInstancesInput struct { // for termination and provides a Spot Instance termination notice, which gives // the instance a two-minute warning before it terminates. // - // Note that you can't specify an Availability Zone group or a launch group - // if you specify a duration. + // You can't specify an Availability Zone group or a launch group if you specify + // a duration. BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide for Linux Instances. ClientToken *string `locationName:"clientToken" type:"string"` // Checks whether you have the required permissions for the action, without @@ -58269,14 +60221,14 @@ type RequestSpotInstancesInput struct { // launch, the request expires, or the request is canceled. If the request is // persistent, the request becomes active at this date and time and remains // active until it expires or is canceled. - ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` // The end date of the request. If this is a one-time request, the request remains // active until all instances launch, the request is canceled, or this date // is reached. If the request is persistent, it remains active until it is canceled // or this date is reached. The default end date is 7 days from the current // date. - ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } // String returns the string representation @@ -58377,7 +60329,6 @@ func (s *RequestSpotInstancesInput) SetValidUntil(v time.Time) *RequestSpotInsta } // Contains the output of RequestSpotInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotInstancesResult type RequestSpotInstancesOutput struct { _ struct{} `type:"structure"` @@ -58402,7 +60353,6 @@ func (s *RequestSpotInstancesOutput) SetSpotInstanceRequests(v []*SpotInstanceRe } // Describes the launch specification for an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotLaunchSpecification type RequestSpotLaunchSpecification struct { _ struct{} `type:"structure"` @@ -58465,9 +60415,7 @@ type RequestSpotLaunchSpecification struct { // The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` - // The user data to make available to the instances. If you are using an AWS - // SDK or command line tool, Base64-encoding is performed for you, and you can - // load the text from a file. Otherwise, you must provide Base64-encoded text. + // The Base64-encoded user data for the instance. UserData *string `locationName:"userData" type:"string"` } @@ -58489,16 +60437,6 @@ func (s *RequestSpotLaunchSpecification) Validate() error { invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) } } - if s.NetworkInterfaces != nil { - for i, v := range s.NetworkInterfaces { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -58603,7 +60541,6 @@ func (s *RequestSpotLaunchSpecification) SetUserData(v string) *RequestSpotLaunc } // Describes a reservation. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Reservation type Reservation struct { _ struct{} `type:"structure"` @@ -58665,7 +60602,6 @@ func (s *Reservation) SetReservationId(v string) *Reservation { } // The cost associated with the Reserved Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservationValue type ReservationValue struct { _ struct{} `type:"structure"` @@ -58709,7 +60645,6 @@ func (s *ReservationValue) SetRemainingUpfrontValue(v string) *ReservationValue } // Describes the limit price of a Reserved Instance offering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstanceLimitPrice type ReservedInstanceLimitPrice struct { _ struct{} `type:"structure"` @@ -58745,7 +60680,6 @@ func (s *ReservedInstanceLimitPrice) SetCurrencyCode(v string) *ReservedInstance } // The total value of the Convertible Reserved Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstanceReservationValue type ReservedInstanceReservationValue struct { _ struct{} `type:"structure"` @@ -58779,7 +60713,6 @@ func (s *ReservedInstanceReservationValue) SetReservedInstanceId(v string) *Rese } // Describes a Reserved Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstances type ReservedInstances struct { _ struct{} `type:"structure"` @@ -58794,7 +60727,7 @@ type ReservedInstances struct { Duration *int64 `locationName:"duration" type:"long"` // The time when the Reserved Instance expires. - End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` + End *time.Time `locationName:"end" type:"timestamp"` // The purchase price of the Reserved Instance. FixedPrice *float64 `locationName:"fixedPrice" type:"float"` @@ -58827,7 +60760,7 @@ type ReservedInstances struct { Scope *string `locationName:"scope" type:"string" enum:"scope"` // The date and time the Reserved Instance started. - Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` + Start *time.Time `locationName:"start" type:"timestamp"` // The state of the Reserved Instance purchase. State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` @@ -58958,7 +60891,6 @@ func (s *ReservedInstances) SetUsagePrice(v float64) *ReservedInstances { } // Describes the configuration settings for the modified Reserved Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstancesConfiguration type ReservedInstancesConfiguration struct { _ struct{} `type:"structure"` @@ -59021,7 +60953,6 @@ func (s *ReservedInstancesConfiguration) SetScope(v string) *ReservedInstancesCo } // Describes the ID of a Reserved Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstancesId type ReservedInstancesId struct { _ struct{} `type:"structure"` @@ -59046,7 +60977,6 @@ func (s *ReservedInstancesId) SetReservedInstancesId(v string) *ReservedInstance } // Describes a Reserved Instance listing. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstancesListing type ReservedInstancesListing struct { _ struct{} `type:"structure"` @@ -59055,7 +60985,7 @@ type ReservedInstancesListing struct { ClientToken *string `locationName:"clientToken" type:"string"` // The time the listing was created. - CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` // The number of instances in this state. InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` @@ -59080,7 +61010,7 @@ type ReservedInstancesListing struct { Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` // The last modified timestamp of the listing. - UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp"` } // String returns the string representation @@ -59154,7 +61084,6 @@ func (s *ReservedInstancesListing) SetUpdateDate(v time.Time) *ReservedInstances } // Describes a Reserved Instance modification. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstancesModification type ReservedInstancesModification struct { _ struct{} `type:"structure"` @@ -59163,10 +61092,10 @@ type ReservedInstancesModification struct { ClientToken *string `locationName:"clientToken" type:"string"` // The time when the modification request was created. - CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` // The time for the modification to become effective. - EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"` + EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp"` // Contains target configurations along with their corresponding new Reserved // Instance IDs. @@ -59185,7 +61114,7 @@ type ReservedInstancesModification struct { StatusMessage *string `locationName:"statusMessage" type:"string"` // The time when the modification request was last updated. - UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp"` } // String returns the string representation @@ -59253,7 +61182,6 @@ func (s *ReservedInstancesModification) SetUpdateDate(v time.Time) *ReservedInst } // Describes the modification request/s. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstancesModificationResult type ReservedInstancesModificationResult struct { _ struct{} `type:"structure"` @@ -59289,7 +61217,6 @@ func (s *ReservedInstancesModificationResult) SetTargetConfiguration(v *Reserved } // Describes a Reserved Instance offering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReservedInstancesOffering type ReservedInstancesOffering struct { _ struct{} `type:"structure"` @@ -59447,7 +61374,6 @@ func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesO return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeRequest type ResetFpgaImageAttributeInput struct { _ struct{} `type:"structure"` @@ -59507,7 +61433,6 @@ func (s *ResetFpgaImageAttributeInput) SetFpgaImageId(v string) *ResetFpgaImageA return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeResult type ResetFpgaImageAttributeOutput struct { _ struct{} `type:"structure"` @@ -59532,7 +61457,6 @@ func (s *ResetFpgaImageAttributeOutput) SetReturn(v bool) *ResetFpgaImageAttribu } // Contains the parameters for ResetImageAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttributeRequest type ResetImageAttributeInput struct { _ struct{} `type:"structure"` @@ -59598,7 +61522,6 @@ func (s *ResetImageAttributeInput) SetImageId(v string) *ResetImageAttributeInpu return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttributeOutput type ResetImageAttributeOutput struct { _ struct{} `type:"structure"` } @@ -59614,7 +61537,6 @@ func (s ResetImageAttributeOutput) GoString() string { } // Contains the parameters for ResetInstanceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetInstanceAttributeRequest type ResetInstanceAttributeInput struct { _ struct{} `type:"structure"` @@ -59682,7 +61604,6 @@ func (s *ResetInstanceAttributeInput) SetInstanceId(v string) *ResetInstanceAttr return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetInstanceAttributeOutput type ResetInstanceAttributeOutput struct { _ struct{} `type:"structure"` } @@ -59698,7 +61619,6 @@ func (s ResetInstanceAttributeOutput) GoString() string { } // Contains the parameters for ResetNetworkInterfaceAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetNetworkInterfaceAttributeRequest type ResetNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` @@ -59758,7 +61678,6 @@ func (s *ResetNetworkInterfaceAttributeInput) SetSourceDestCheck(v string) *Rese return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetNetworkInterfaceAttributeOutput type ResetNetworkInterfaceAttributeOutput struct { _ struct{} `type:"structure"` } @@ -59774,7 +61693,6 @@ func (s ResetNetworkInterfaceAttributeOutput) GoString() string { } // Contains the parameters for ResetSnapshotAttribute. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetSnapshotAttributeRequest type ResetSnapshotAttributeInput struct { _ struct{} `type:"structure"` @@ -59840,7 +61758,6 @@ func (s *ResetSnapshotAttributeInput) SetSnapshotId(v string) *ResetSnapshotAttr return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetSnapshotAttributeOutput type ResetSnapshotAttributeOutput struct { _ struct{} `type:"structure"` } @@ -59857,7 +61774,6 @@ func (s ResetSnapshotAttributeOutput) GoString() string { // Describes the error that's returned when you cannot delete a launch template // version. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResponseError type ResponseError struct { _ struct{} `type:"structure"` @@ -59891,13 +61807,17 @@ func (s *ResponseError) SetMessage(v string) *ResponseError { } // The information for a launch template. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResponseLaunchTemplateData type ResponseLaunchTemplateData struct { _ struct{} `type:"structure"` // The block device mappings. BlockDeviceMappings []*LaunchTemplateBlockDeviceMapping `locationName:"blockDeviceMappingSet" locationNameList:"item" type:"list"` + // The CPU options for the instance. For more information, see Optimizing CPU + // Options (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon Elastic Compute Cloud User Guide. + CpuOptions *LaunchTemplateCpuOptions `locationName:"cpuOptions" type:"structure"` + // The credit option for CPU usage of the instance. CreditSpecification *CreditSpecification `locationName:"creditSpecification" type:"structure"` @@ -59974,6 +61894,12 @@ func (s *ResponseLaunchTemplateData) SetBlockDeviceMappings(v []*LaunchTemplateB return s } +// SetCpuOptions sets the CpuOptions field's value. +func (s *ResponseLaunchTemplateData) SetCpuOptions(v *LaunchTemplateCpuOptions) *ResponseLaunchTemplateData { + s.CpuOptions = v + return s +} + // SetCreditSpecification sets the CreditSpecification field's value. func (s *ResponseLaunchTemplateData) SetCreditSpecification(v *CreditSpecification) *ResponseLaunchTemplateData { s.CreditSpecification = v @@ -60089,7 +62015,6 @@ func (s *ResponseLaunchTemplateData) SetUserData(v string) *ResponseLaunchTempla } // Contains the parameters for RestoreAddressToClassic. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreAddressToClassicRequest type RestoreAddressToClassicInput struct { _ struct{} `type:"structure"` @@ -60141,7 +62066,6 @@ func (s *RestoreAddressToClassicInput) SetPublicIp(v string) *RestoreAddressToCl } // Contains the output of RestoreAddressToClassic. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreAddressToClassicResult type RestoreAddressToClassicOutput struct { _ struct{} `type:"structure"` @@ -60175,7 +62099,6 @@ func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToCla } // Contains the parameters for RevokeSecurityGroupEgress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupEgressRequest type RevokeSecurityGroupEgressInput struct { _ struct{} `type:"structure"` @@ -60293,7 +62216,6 @@ func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroup return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupEgressOutput type RevokeSecurityGroupEgressOutput struct { _ struct{} `type:"structure"` } @@ -60309,7 +62231,6 @@ func (s RevokeSecurityGroupEgressOutput) GoString() string { } // Contains the parameters for RevokeSecurityGroupIngress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupIngressRequest type RevokeSecurityGroupIngressInput struct { _ struct{} `type:"structure"` @@ -60435,7 +62356,6 @@ func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGrou return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupIngressOutput type RevokeSecurityGroupIngressOutput struct { _ struct{} `type:"structure"` } @@ -60451,7 +62371,6 @@ func (s RevokeSecurityGroupIngressOutput) GoString() string { } // Describes a route in a route table. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Route type Route struct { _ struct{} `type:"structure"` @@ -60464,7 +62383,7 @@ type Route struct { // The prefix of the AWS service. DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"` - // The ID of the egress-only Internet gateway. + // The ID of the egress-only internet gateway. EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` // The ID of a gateway attached to your VPC. @@ -60584,7 +62503,6 @@ func (s *Route) SetVpcPeeringConnectionId(v string) *Route { } // Describes a route table. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RouteTable type RouteTable struct { _ struct{} `type:"structure"` @@ -60654,7 +62572,6 @@ func (s *RouteTable) SetVpcId(v string) *RouteTable { } // Describes an association between a route table and a subnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RouteTableAssociation type RouteTableAssociation struct { _ struct{} `type:"structure"` @@ -60706,7 +62623,6 @@ func (s *RouteTableAssociation) SetSubnetId(v string) *RouteTableAssociation { } // Contains the parameters for RunInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunInstancesRequest type RunInstancesInput struct { _ struct{} `type:"structure"` @@ -60725,6 +62641,11 @@ type RunInstancesInput struct { // Constraints: Maximum 64 ASCII characters ClientToken *string `locationName:"clientToken" type:"string"` + // The CPU options for the instance. For more information, see Optimizing CPU + // Options (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon Elastic Compute Cloud User Guide. + CpuOptions *CpuOptionsRequest `type:"structure"` + // The credit option for CPU usage of the instance. Valid values are standard // and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. // For more information, see T2 Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-instances.html) @@ -60763,7 +62684,9 @@ type RunInstancesInput struct { // The IAM instance profile. IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` - // The ID of the AMI, which you can get by calling DescribeImages. + // The ID of the AMI, which you can get by calling DescribeImages. An AMI is + // required to launch an instance and must be specified here or in a launch + // template. ImageId *string `type:"string"` // Indicates whether an instance stops or terminates when you initiate shutdown @@ -60773,6 +62696,9 @@ type RunInstancesInput struct { InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` // The market (purchasing) option for the instances. + // + // For RunInstances, persistent Spot Instance requests are only supported when + // InstanceInterruptionBehavior is set to either hibernate or stop. InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"` // The instance type. For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) @@ -60811,6 +62737,7 @@ type RunInstancesInput struct { // The launch template to use to launch the instances. Any parameters that you // specify in RunInstances override the same parameters in the launch template. + // You can specify either the name or ID of a launch template, but not both. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // The maximum number of instances to launch. If you specify more instances @@ -60876,9 +62803,10 @@ type RunInstancesInput struct { // [EC2-VPC] The ID of the subnet to launch the instance into. SubnetId *string `type:"string"` - // The tags to apply to the resources during launch. You can tag instances and - // volumes. The specified tags are applied to all instances or volumes that - // are created during launch. + // The tags to apply to the resources during launch. You can only tag instances + // and volumes on launch. The specified tags are applied to all instances or + // volumes that are created during launch. To tag a resource after it has been + // created, see CreateTags. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The user data to make available to the instance. For more information, see @@ -60929,16 +62857,6 @@ func (s *RunInstancesInput) Validate() error { invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) } } - if s.NetworkInterfaces != nil { - for i, v := range s.NetworkInterfaces { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -60964,6 +62882,12 @@ func (s *RunInstancesInput) SetClientToken(v string) *RunInstancesInput { return s } +// SetCpuOptions sets the CpuOptions field's value. +func (s *RunInstancesInput) SetCpuOptions(v *CpuOptionsRequest) *RunInstancesInput { + s.CpuOptions = v + return s +} + // SetCreditSpecification sets the CreditSpecification field's value. func (s *RunInstancesInput) SetCreditSpecification(v *CreditSpecificationRequest) *RunInstancesInput { s.CreditSpecification = v @@ -61127,7 +63051,6 @@ func (s *RunInstancesInput) SetUserData(v string) *RunInstancesInput { } // Describes the monitoring of an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunInstancesMonitoringEnabled type RunInstancesMonitoringEnabled struct { _ struct{} `type:"structure"` @@ -61168,7 +63091,6 @@ func (s *RunInstancesMonitoringEnabled) SetEnabled(v bool) *RunInstancesMonitori } // Contains the parameters for RunScheduledInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunScheduledInstancesRequest type RunScheduledInstancesInput struct { _ struct{} `type:"structure"` @@ -61261,7 +63183,6 @@ func (s *RunScheduledInstancesInput) SetScheduledInstanceId(v string) *RunSchedu } // Contains the output of RunScheduledInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunScheduledInstancesResult type RunScheduledInstancesOutput struct { _ struct{} `type:"structure"` @@ -61287,7 +63208,6 @@ func (s *RunScheduledInstancesOutput) SetInstanceIdSet(v []*string) *RunSchedule // Describes the storage parameters for S3 and S3 buckets for an instance store-backed // AMI. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/S3Storage type S3Storage struct { _ struct{} `type:"structure"` @@ -61355,7 +63275,6 @@ func (s *S3Storage) SetUploadPolicySignature(v string) *S3Storage { } // Describes a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstance type ScheduledInstance struct { _ struct{} `type:"structure"` @@ -61363,7 +63282,7 @@ type ScheduledInstance struct { AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The date when the Scheduled Instance was purchased. - CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` // The hourly price for a single instance. HourlyPrice *string `locationName:"hourlyPrice" type:"string"` @@ -61378,13 +63297,13 @@ type ScheduledInstance struct { NetworkPlatform *string `locationName:"networkPlatform" type:"string"` // The time for the next schedule to start. - NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp"` // The platform (Linux/UNIX or Windows). Platform *string `locationName:"platform" type:"string"` // The time that the previous schedule ended or will end. - PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp" timestampFormat:"iso8601"` + PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp"` // The schedule recurrence. Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` @@ -61396,10 +63315,10 @@ type ScheduledInstance struct { SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` // The end date for the Scheduled Instance. - TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp" timestampFormat:"iso8601"` + TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp"` // The start date for the Scheduled Instance. - TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp" timestampFormat:"iso8601"` + TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp"` // The total number of hours for a single instance for the entire term. TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` @@ -61506,7 +63425,6 @@ func (s *ScheduledInstance) SetTotalScheduledInstanceHours(v int64) *ScheduledIn } // Describes a schedule that is available for your Scheduled Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstanceAvailability type ScheduledInstanceAvailability struct { _ struct{} `type:"structure"` @@ -61517,7 +63435,7 @@ type ScheduledInstanceAvailability struct { AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` // The time period for the first schedule to start. - FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp"` // The hourly price for a single instance. HourlyPrice *string `locationName:"hourlyPrice" type:"string"` @@ -61640,7 +63558,6 @@ func (s *ScheduledInstanceAvailability) SetTotalScheduledInstanceHours(v int64) } // Describes the recurring schedule for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstanceRecurrence type ScheduledInstanceRecurrence struct { _ struct{} `type:"structure"` @@ -61705,7 +63622,6 @@ func (s *ScheduledInstanceRecurrence) SetOccurrenceUnit(v string) *ScheduledInst } // Describes the recurring schedule for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstanceRecurrenceRequest type ScheduledInstanceRecurrenceRequest struct { _ struct{} `type:"structure"` @@ -61773,7 +63689,6 @@ func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceUnit(v string) *Schedu } // Describes a block device mapping for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesBlockDeviceMapping type ScheduledInstancesBlockDeviceMapping struct { _ struct{} `type:"structure"` @@ -61836,7 +63751,6 @@ func (s *ScheduledInstancesBlockDeviceMapping) SetVirtualName(v string) *Schedul } // Describes an EBS volume for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesEbs type ScheduledInstancesEbs struct { _ struct{} `type:"structure"` @@ -61925,7 +63839,6 @@ func (s *ScheduledInstancesEbs) SetVolumeType(v string) *ScheduledInstancesEbs { } // Describes an IAM instance profile for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesIamInstanceProfile type ScheduledInstancesIamInstanceProfile struct { _ struct{} `type:"structure"` @@ -61959,7 +63872,6 @@ func (s *ScheduledInstancesIamInstanceProfile) SetName(v string) *ScheduledInsta } // Describes an IPv6 address. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesIpv6Address type ScheduledInstancesIpv6Address struct { _ struct{} `type:"structure"` @@ -61988,7 +63900,6 @@ func (s *ScheduledInstancesIpv6Address) SetIpv6Address(v string) *ScheduledInsta // If you are launching the Scheduled Instance in EC2-VPC, you must specify // the ID of the subnet. You can specify the subnet using either SubnetId or // NetworkInterface. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesLaunchSpecification type ScheduledInstancesLaunchSpecification struct { _ struct{} `type:"structure"` @@ -62151,7 +64062,6 @@ func (s *ScheduledInstancesLaunchSpecification) SetUserData(v string) *Scheduled } // Describes whether monitoring is enabled for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesMonitoring type ScheduledInstancesMonitoring struct { _ struct{} `type:"structure"` @@ -62176,7 +64086,6 @@ func (s *ScheduledInstancesMonitoring) SetEnabled(v bool) *ScheduledInstancesMon } // Describes a network interface for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesNetworkInterface type ScheduledInstancesNetworkInterface struct { _ struct{} `type:"structure"` @@ -62305,7 +64214,6 @@ func (s *ScheduledInstancesNetworkInterface) SetSubnetId(v string) *ScheduledIns } // Describes the placement for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesPlacement type ScheduledInstancesPlacement struct { _ struct{} `type:"structure"` @@ -62339,7 +64247,6 @@ func (s *ScheduledInstancesPlacement) SetGroupName(v string) *ScheduledInstances } // Describes a private IPv4 address for a Scheduled Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ScheduledInstancesPrivateIpAddressConfig type ScheduledInstancesPrivateIpAddressConfig struct { _ struct{} `type:"structure"` @@ -62374,7 +64281,6 @@ func (s *ScheduledInstancesPrivateIpAddressConfig) SetPrivateIpAddress(v string) } // Describes a security group -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SecurityGroup type SecurityGroup struct { _ struct{} `type:"structure"` @@ -62462,7 +64368,6 @@ func (s *SecurityGroup) SetVpcId(v string) *SecurityGroup { } // Describes a security group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SecurityGroupIdentifier type SecurityGroupIdentifier struct { _ struct{} `type:"structure"` @@ -62496,7 +64401,6 @@ func (s *SecurityGroupIdentifier) SetGroupName(v string) *SecurityGroupIdentifie } // Describes a VPC with a security group that references your security group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SecurityGroupReference type SecurityGroupReference struct { _ struct{} `type:"structure"` @@ -62543,7 +64447,6 @@ func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGr } // Describes a service configuration for a VPC endpoint service. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ServiceConfiguration type ServiceConfiguration struct { _ struct{} `type:"structure"` @@ -62641,7 +64544,6 @@ func (s *ServiceConfiguration) SetServiceType(v []*ServiceTypeDetail) *ServiceCo } // Describes a VPC endpoint service. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ServiceDetail type ServiceDetail struct { _ struct{} `type:"structure"` @@ -62730,7 +64632,6 @@ func (s *ServiceDetail) SetVpcEndpointPolicySupported(v bool) *ServiceDetail { } // Describes the type of service for a VPC endpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ServiceTypeDetail type ServiceTypeDetail struct { _ struct{} `type:"structure"` @@ -62756,21 +64657,20 @@ func (s *ServiceTypeDetail) SetServiceType(v string) *ServiceTypeDetail { // Describes the time period for a Scheduled Instance to start its first schedule. // The time period must span less than one day. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SlotDateTimeRangeRequest type SlotDateTimeRangeRequest struct { _ struct{} `type:"structure"` // The earliest date and time, in UTC, for the Scheduled Instance to start. // // EarliestTime is a required field - EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + EarliestTime *time.Time `type:"timestamp" required:"true"` // The latest date and time, in UTC, for the Scheduled Instance to start. This // value must be later than or equal to the earliest date and at most three // months in the future. // // LatestTime is a required field - LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + LatestTime *time.Time `type:"timestamp" required:"true"` } // String returns the string representation @@ -62812,15 +64712,14 @@ func (s *SlotDateTimeRangeRequest) SetLatestTime(v time.Time) *SlotDateTimeRange } // Describes the time period for a Scheduled Instance to start its first schedule. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SlotStartTimeRangeRequest type SlotStartTimeRangeRequest struct { _ struct{} `type:"structure"` // The earliest date and time, in UTC, for the Scheduled Instance to start. - EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + EarliestTime *time.Time `type:"timestamp"` // The latest date and time, in UTC, for the Scheduled Instance to start. - LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LatestTime *time.Time `type:"timestamp"` } // String returns the string representation @@ -62846,7 +64745,6 @@ func (s *SlotStartTimeRangeRequest) SetLatestTime(v time.Time) *SlotStartTimeRan } // Describes a snapshot. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Snapshot type Snapshot struct { _ struct{} `type:"structure"` @@ -62885,7 +64783,7 @@ type Snapshot struct { SnapshotId *string `locationName:"snapshotId" type:"string"` // The time stamp when the snapshot was initiated. - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + StartTime *time.Time `locationName:"startTime" type:"timestamp"` // The snapshot state. State *string `locationName:"status" type:"string" enum:"SnapshotState"` @@ -63004,7 +64902,6 @@ func (s *Snapshot) SetVolumeSize(v int64) *Snapshot { } // Describes the snapshot created from the imported disk. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SnapshotDetail type SnapshotDetail struct { _ struct{} `type:"structure"` @@ -63110,7 +65007,6 @@ func (s *SnapshotDetail) SetUserBucket(v *UserBucketDetails) *SnapshotDetail { } // The disk container object for the import snapshot request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SnapshotDiskContainer type SnapshotDiskContainer struct { _ struct{} `type:"structure"` @@ -63119,7 +65015,7 @@ type SnapshotDiskContainer struct { // The format of the disk image being imported. // - // Valid values: RAW | VHD | VMDK | OVA + // Valid values: VHD | VMDK | OVA Format *string `type:"string"` // The URL to the Amazon S3-based disk image being imported. It can either be @@ -63165,7 +65061,6 @@ func (s *SnapshotDiskContainer) SetUserBucket(v *UserBucket) *SnapshotDiskContai } // Details about the import snapshot task. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SnapshotTaskDetail type SnapshotTaskDetail struct { _ struct{} `type:"structure"` @@ -63262,7 +65157,6 @@ func (s *SnapshotTaskDetail) SetUserBucket(v *UserBucketDetails) *SnapshotTaskDe } // Describes the data feed for a Spot Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotDatafeedSubscription type SpotDatafeedSubscription struct { _ struct{} `type:"structure"` @@ -63323,7 +65217,6 @@ func (s *SpotDatafeedSubscription) SetState(v string) *SpotDatafeedSubscription } // Describes the launch specification for one or more Spot Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetLaunchSpecification type SpotFleetLaunchSpecification struct { _ struct{} `type:"structure"` @@ -63391,9 +65284,7 @@ type SpotFleetLaunchSpecification struct { // The tags to apply during creation. TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"` - // The user data to make available to the instances. If you are using an AWS - // SDK or command line tool, Base64-encoding is performed for you, and you can - // load the text from a file. Otherwise, you must provide Base64-encoded text. + // The Base64-encoded user data to make available to the instances. UserData *string `locationName:"userData" type:"string"` // The number of units provided by the specified instance type. These are the @@ -63416,26 +65307,6 @@ func (s SpotFleetLaunchSpecification) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *SpotFleetLaunchSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SpotFleetLaunchSpecification"} - if s.NetworkInterfaces != nil { - for i, v := range s.NetworkInterfaces { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetAddressingType sets the AddressingType field's value. func (s *SpotFleetLaunchSpecification) SetAddressingType(v string) *SpotFleetLaunchSpecification { s.AddressingType = &v @@ -63545,7 +65416,6 @@ func (s *SpotFleetLaunchSpecification) SetWeightedCapacity(v float64) *SpotFleet } // Describes whether monitoring is enabled. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetMonitoring type SpotFleetMonitoring struct { _ struct{} `type:"structure"` @@ -63572,7 +65442,6 @@ func (s *SpotFleetMonitoring) SetEnabled(v bool) *SpotFleetMonitoring { } // Describes a Spot Fleet request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetRequestConfig type SpotFleetRequestConfig struct { _ struct{} `type:"structure"` @@ -63586,7 +65455,7 @@ type SpotFleetRequestConfig struct { // The creation date and time of the request. // // CreateTime is a required field - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` // The configuration of the Spot Fleet request. // @@ -63645,7 +65514,6 @@ func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRe } // Describes the configuration of a Spot Fleet request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetRequestConfigData type SpotFleetRequestConfigData struct { _ struct{} `type:"structure"` @@ -63653,8 +65521,8 @@ type SpotFleetRequestConfigData struct { // by the Spot Fleet request. The default is lowestPrice. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` - // A unique, case-sensitive identifier you provide to ensure idempotency of - // your listings. This helps avoid duplicate listings. For more information, + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of your listings. This helps to avoid duplicate listings. For more information, // see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` @@ -63677,6 +65545,12 @@ type SpotFleetRequestConfigData struct { // The behavior when a Spot Instance is interrupted. The default is terminate. InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet + // selects the cheapest Spot pools and evenly allocates your target Spot capacity + // across the number of Spot pools that you specify. + InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` + // The launch specifications for the Spot Fleet request. LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" type:"list"` @@ -63692,6 +65566,25 @@ type SpotFleetRequestConfigData struct { // HS1, M1, M2, M3, and T1. LoadBalancersConfig *LoadBalancersConfig `locationName:"loadBalancersConfig" type:"structure"` + // The order of the launch template overrides to use in fulfilling On-Demand + // capacity. If you specify lowestPrice, Spot Fleet uses price to determine + // the order, launching the lowest price first. If you specify prioritized, + // Spot Fleet uses the priority that you assign to each Spot Fleet launch template + // override, launching the highest priority first. If you do not specify a value, + // Spot Fleet defaults to lowestPrice. + OnDemandAllocationStrategy *string `locationName:"onDemandAllocationStrategy" type:"string" enum:"OnDemandAllocationStrategy"` + + // The number of On-Demand units fulfilled by this request compared to the set + // target On-Demand capacity. + OnDemandFulfilledCapacity *float64 `locationName:"onDemandFulfilledCapacity" type:"double"` + + // The number of On-Demand units to request. You can choose to set the target + // capacity in terms of instances or a performance characteristic that is important + // to your application workload, such as vCPUs, memory, or I/O. If the request + // type is maintain, you can specify a target capacity of 0 and add capacity + // later. + OnDemandTargetCapacity *int64 `locationName:"onDemandTargetCapacity" type:"integer"` + // Indicates whether Spot Fleet should replace unhealthy instances. ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` @@ -63712,24 +65605,23 @@ type SpotFleetRequestConfigData struct { // Fleet request expires. TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` - // The type of request. Indicates whether the fleet will only request the target - // capacity or also attempt to maintain it. When you request a certain target - // capacity, the fleet will only place the required requests. It will not attempt - // to replenish Spot Instances if capacity is diminished, nor will it submit - // requests in alternative Spot pools if capacity is not available. When you - // want to maintain a certain target capacity, fleet will place the required - // requests to meet this target capacity. It will also automatically replenish - // any interrupted instances. Default: maintain. + // The type of request. Indicates whether the Spot Fleet only requests the target + // capacity or also attempts to maintain it. When this value is request, the + // Spot Fleet only places the required requests. It does not attempt to replenish + // Spot Instances if capacity is diminished, nor does it submit requests in + // alternative Spot pools if capacity is not available. To maintain a certain + // target capacity, the Spot Fleet places the required requests to meet capacity + // and automatically replenishes any interrupted instances. Default: maintain. Type *string `locationName:"type" type:"string" enum:"FleetType"` // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // The default is to start fulfilling the request immediately. - ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // At this point, no new Spot Instance requests are placed or able to fulfill // the request. The default end date is 7 days from the current date. - ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } // String returns the string representation @@ -63751,16 +65643,6 @@ func (s *SpotFleetRequestConfigData) Validate() error { if s.TargetCapacity == nil { invalidParams.Add(request.NewErrParamRequired("TargetCapacity")) } - if s.LaunchSpecifications != nil { - for i, v := range s.LaunchSpecifications { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchSpecifications", i), err.(request.ErrInvalidParams)) - } - } - } if s.LaunchTemplateConfigs != nil { for i, v := range s.LaunchTemplateConfigs { if v == nil { @@ -63819,6 +65701,12 @@ func (s *SpotFleetRequestConfigData) SetInstanceInterruptionBehavior(v string) * return s } +// SetInstancePoolsToUseCount sets the InstancePoolsToUseCount field's value. +func (s *SpotFleetRequestConfigData) SetInstancePoolsToUseCount(v int64) *SpotFleetRequestConfigData { + s.InstancePoolsToUseCount = &v + return s +} + // SetLaunchSpecifications sets the LaunchSpecifications field's value. func (s *SpotFleetRequestConfigData) SetLaunchSpecifications(v []*SpotFleetLaunchSpecification) *SpotFleetRequestConfigData { s.LaunchSpecifications = v @@ -63837,6 +65725,24 @@ func (s *SpotFleetRequestConfigData) SetLoadBalancersConfig(v *LoadBalancersConf return s } +// SetOnDemandAllocationStrategy sets the OnDemandAllocationStrategy field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandAllocationStrategy(v string) *SpotFleetRequestConfigData { + s.OnDemandAllocationStrategy = &v + return s +} + +// SetOnDemandFulfilledCapacity sets the OnDemandFulfilledCapacity field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandFulfilledCapacity(v float64) *SpotFleetRequestConfigData { + s.OnDemandFulfilledCapacity = &v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandTargetCapacity(v int64) *SpotFleetRequestConfigData { + s.OnDemandTargetCapacity = &v + return s +} + // SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. func (s *SpotFleetRequestConfigData) SetReplaceUnhealthyInstances(v bool) *SpotFleetRequestConfigData { s.ReplaceUnhealthyInstances = &v @@ -63880,7 +65786,6 @@ func (s *SpotFleetRequestConfigData) SetValidUntil(v time.Time) *SpotFleetReques } // The tags for a Spot Fleet resource. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetTagSpecification type SpotFleetTagSpecification struct { _ struct{} `type:"structure"` @@ -63915,7 +65820,6 @@ func (s *SpotFleetTagSpecification) SetTags(v []*Tag) *SpotFleetTagSpecification } // Describes a Spot Instance request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotInstanceRequest type SpotInstanceRequest struct { _ struct{} `type:"structure"` @@ -63933,7 +65837,7 @@ type SpotInstanceRequest struct { // The date and time when the Spot Instance request was created, in UTC format // (for example, YYYY-MM-DDTHH:MM:SSZ). - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` // The fault codes for the Spot Instance request, if any. Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` @@ -63964,10 +65868,9 @@ type SpotInstanceRequest struct { // The maximum price per hour that you are willing to pay for a Spot Instance. SpotPrice *string `locationName:"spotPrice" type:"string"` - // The state of the Spot Instance request. Spot status information can help - // you track your Spot Instance requests. For more information, see Spot Status - // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) - // in the Amazon Elastic Compute Cloud User Guide. + // The state of the Spot Instance request. Spot status information helps track + // your Spot Instance requests. For more information, see Spot Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon EC2 User Guide for Linux Instances. State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` // The status code and status message describing the Spot Instance request. @@ -63981,14 +65884,14 @@ type SpotInstanceRequest struct { // The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // The request becomes active at this date and time. - ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // If this is a one-time request, it remains active until all instances launch, // the request is canceled, or this date is reached. If the request is persistent, // it remains active until it is canceled or this date is reached. The default // end date is 7 days from the current date. - ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } // String returns the string representation @@ -64116,7 +66019,6 @@ func (s *SpotInstanceRequest) SetValidUntil(v time.Time) *SpotInstanceRequest { } // Describes a Spot Instance state change. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotInstanceStateFault type SpotInstanceStateFault struct { _ struct{} `type:"structure"` @@ -64150,12 +66052,11 @@ func (s *SpotInstanceStateFault) SetMessage(v string) *SpotInstanceStateFault { } // Describes the status of a Spot Instance request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotInstanceStatus type SpotInstanceStatus struct { _ struct{} `type:"structure"` // The status code. For a list of status codes, see Spot Status Codes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide for Linux Instances. Code *string `locationName:"code" type:"string"` // The description for the status code. @@ -64163,7 +66064,7 @@ type SpotInstanceStatus struct { // The date and time of the most recent status update, in UTC format (for example, // YYYY-MM-DDTHH:MM:SSZ). - UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp"` } // String returns the string representation @@ -64195,7 +66096,6 @@ func (s *SpotInstanceStatus) SetUpdateTime(v time.Time) *SpotInstanceStatus { } // The options for Spot Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotMarketOptions type SpotMarketOptions struct { _ struct{} `type:"structure"` @@ -64211,7 +66111,9 @@ type SpotMarketOptions struct { // default is the On-Demand price. MaxPrice *string `type:"string"` - // The Spot Instance request type. + // The Spot Instance request type. For RunInstances, persistent Spot Instance + // requests are only supported when InstanceInterruptionBehavior is set to either + // hibernate or stop. SpotInstanceType *string `type:"string" enum:"SpotInstanceType"` // The end date of the request. For a one-time request, the request remains @@ -64219,7 +66121,7 @@ type SpotMarketOptions struct { // is reached. If the request is persistent, it remains active until it is canceled // or this date and time is reached. The default end date is 7 days from the // current date. - ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` + ValidUntil *time.Time `type:"timestamp"` } // String returns the string representation @@ -64262,8 +66164,99 @@ func (s *SpotMarketOptions) SetValidUntil(v time.Time) *SpotMarketOptions { return s } +// Describes the configuration of Spot Instances in an EC2 Fleet. +type SpotOptions struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target capacity across the Spot pools specified + // by the Spot Fleet request. The default is lowest-price. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"SpotInstanceInterruptionBehavior"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when AllocationStrategy is set to lowestPrice. EC2 Fleet selects + // the cheapest Spot pools and evenly allocates your target Spot capacity across + // the number of Spot pools that you specify. + InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` +} + +// String returns the string representation +func (s SpotOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotOptions) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *SpotOptions) SetAllocationStrategy(v string) *SpotOptions { + s.AllocationStrategy = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotOptions) SetInstanceInterruptionBehavior(v string) *SpotOptions { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetInstancePoolsToUseCount sets the InstancePoolsToUseCount field's value. +func (s *SpotOptions) SetInstancePoolsToUseCount(v int64) *SpotOptions { + s.InstancePoolsToUseCount = &v + return s +} + +// Describes the configuration of Spot Instances in an EC2 Fleet request. +type SpotOptionsRequest struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target capacity across the Spot pools specified + // by the Spot Fleet request. The default is lowestPrice. + AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `type:"string" enum:"SpotInstanceInterruptionBehavior"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet + // selects the cheapest Spot pools and evenly allocates your target Spot capacity + // across the number of Spot pools that you specify. + InstancePoolsToUseCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s SpotOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotOptionsRequest) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *SpotOptionsRequest) SetAllocationStrategy(v string) *SpotOptionsRequest { + s.AllocationStrategy = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotOptionsRequest) SetInstanceInterruptionBehavior(v string) *SpotOptionsRequest { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetInstancePoolsToUseCount sets the InstancePoolsToUseCount field's value. +func (s *SpotOptionsRequest) SetInstancePoolsToUseCount(v int64) *SpotOptionsRequest { + s.InstancePoolsToUseCount = &v + return s +} + // Describes Spot Instance placement. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotPlacement type SpotPlacement struct { _ struct{} `type:"structure"` @@ -64312,7 +66305,6 @@ func (s *SpotPlacement) SetTenancy(v string) *SpotPlacement { // Describes the maximum price per hour that you are willing to pay for a Spot // Instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotPrice type SpotPrice struct { _ struct{} `type:"structure"` @@ -64329,7 +66321,7 @@ type SpotPrice struct { SpotPrice *string `locationName:"spotPrice" type:"string"` // The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` } // String returns the string representation @@ -64373,7 +66365,6 @@ func (s *SpotPrice) SetTimestamp(v time.Time) *SpotPrice { } // Describes a stale rule in a security group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StaleIpPermission type StaleIpPermission struct { _ struct{} `type:"structure"` @@ -64448,7 +66439,6 @@ func (s *StaleIpPermission) SetUserIdGroupPairs(v []*UserIdGroupPair) *StaleIpPe } // Describes a stale security group (a security group that contains stale rules). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StaleSecurityGroup type StaleSecurityGroup struct { _ struct{} `type:"structure"` @@ -64520,7 +66510,6 @@ func (s *StaleSecurityGroup) SetVpcId(v string) *StaleSecurityGroup { } // Contains the parameters for StartInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartInstancesRequest type StartInstancesInput struct { _ struct{} `type:"structure"` @@ -64581,7 +66570,6 @@ func (s *StartInstancesInput) SetInstanceIds(v []*string) *StartInstancesInput { } // Contains the output of StartInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartInstancesResult type StartInstancesOutput struct { _ struct{} `type:"structure"` @@ -64606,7 +66594,6 @@ func (s *StartInstancesOutput) SetStartingInstances(v []*InstanceStateChange) *S } // Describes a state change. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StateReason type StateReason struct { _ struct{} `type:"structure"` @@ -64615,19 +66602,23 @@ type StateReason struct { // The message for the state change. // - // * Server.InsufficientInstanceCapacity: There was insufficient instance - // capacity to satisfy the launch request. + // * Server.InsufficientInstanceCapacity: There was insufficient capacity + // available to satisfy the launch request. // - // * Server.InternalError: An internal error occurred during instance launch, - // resulting in termination. + // * Server.InternalError: An internal error caused the instance to terminate + // during launch. // // * Server.ScheduledStop: The instance was stopped due to a scheduled retirement. // - // * Server.SpotInstanceTermination: A Spot Instance was terminated due to - // an increase in the Spot price. + // * Server.SpotInstanceShutdown: The instance was stopped because the number + // of Spot requests with a maximum price equal to or higher than the Spot + // price exceeded available capacity or because of an increase in the Spot + // price. // - // * Client.InternalError: A client error caused the instance to terminate - // on launch. + // * Server.SpotInstanceTermination: The instance was terminated because + // the number of Spot requests with a maximum price equal to or higher than + // the Spot price exceeded available capacity or because of an increase in + // the Spot price. // // * Client.InstanceInitiatedShutdown: The instance was shut down using the // shutdown -h command from the instance. @@ -64635,14 +66626,17 @@ type StateReason struct { // * Client.InstanceTerminated: The instance was terminated or rebooted during // AMI creation. // + // * Client.InternalError: A client error caused the instance to terminate + // during launch. + // + // * Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + // // * Client.UserInitiatedShutdown: The instance was shut down using the Amazon // EC2 API. // // * Client.VolumeLimitExceeded: The limit on the number of EBS volumes or // total storage was exceeded. Decrease usage or request an increase in your - // limits. - // - // * Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + // account limits. Message *string `locationName:"message" type:"string"` } @@ -64669,7 +66663,6 @@ func (s *StateReason) SetMessage(v string) *StateReason { } // Contains the parameters for StopInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StopInstancesRequest type StopInstancesInput struct { _ struct{} `type:"structure"` @@ -64735,7 +66728,6 @@ func (s *StopInstancesInput) SetInstanceIds(v []*string) *StopInstancesInput { } // Contains the output of StopInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StopInstancesResult type StopInstancesOutput struct { _ struct{} `type:"structure"` @@ -64760,7 +66752,6 @@ func (s *StopInstancesOutput) SetStoppingInstances(v []*InstanceStateChange) *St } // Describes the storage location for an instance store-backed AMI. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Storage type Storage struct { _ struct{} `type:"structure"` @@ -64785,7 +66776,6 @@ func (s *Storage) SetS3(v *S3Storage) *Storage { } // Describes a storage location in Amazon S3. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StorageLocation type StorageLocation struct { _ struct{} `type:"structure"` @@ -64819,7 +66809,6 @@ func (s *StorageLocation) SetKey(v string) *StorageLocation { } // Describes a subnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Subnet type Subnet struct { _ struct{} `type:"structure"` @@ -64830,8 +66819,8 @@ type Subnet struct { // The Availability Zone of the subnet. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The number of unused private IPv4 addresses in the subnet. Note that the - // IPv4 addresses for any stopped instances are considered unavailable. + // The number of unused private IPv4 addresses in the subnet. The IPv4 addresses + // for any stopped instances are considered unavailable. AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"` // The IPv4 CIDR block assigned to the subnet. @@ -64937,7 +66926,6 @@ func (s *Subnet) SetVpcId(v string) *Subnet { } // Describes the state of a CIDR block. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SubnetCidrBlockState type SubnetCidrBlockState struct { _ struct{} `type:"structure"` @@ -64971,7 +66959,6 @@ func (s *SubnetCidrBlockState) SetStatusMessage(v string) *SubnetCidrBlockState } // Describes an IPv6 CIDR block associated with a subnet. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SubnetIpv6CidrBlockAssociation type SubnetIpv6CidrBlockAssociation struct { _ struct{} `type:"structure"` @@ -65015,7 +67002,6 @@ func (s *SubnetIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *SubnetCidrBloc // Describes the T2 instance whose credit option for CPU usage was successfully // modified. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SuccessfulInstanceCreditSpecificationItem type SuccessfulInstanceCreditSpecificationItem struct { _ struct{} `type:"structure"` @@ -65040,7 +67026,6 @@ func (s *SuccessfulInstanceCreditSpecificationItem) SetInstanceId(v string) *Suc } // Describes a tag. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Tag type Tag struct { _ struct{} `type:"structure"` @@ -65080,7 +67065,6 @@ func (s *Tag) SetValue(v string) *Tag { } // Describes a tag. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TagDescription type TagDescription struct { _ struct{} `type:"structure"` @@ -65132,12 +67116,12 @@ func (s *TagDescription) SetValue(v string) *TagDescription { } // The tags to apply to a resource when the resource is being created. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TagSpecification type TagSpecification struct { _ struct{} `type:"structure"` // The type of resource to tag. Currently, the resource types that support tagging - // on creation are instance and volume. + // on creation are fleet, instance, snapshot, and volume. To tag a resource + // after it has been created, see CreateTags. ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` // The tags to apply to the resource. @@ -65166,8 +67150,132 @@ func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification { return s } +// The number of units to request. You can choose to set the target capacity +// in terms of instances or a performance characteristic that is important to +// your application workload, such as vCPUs, memory, or I/O. If the request +// type is maintain, you can specify a target capacity of 0 and add capacity +// later. +type TargetCapacitySpecification struct { + _ struct{} `type:"structure"` + + // The default TotalTargetCapacity, which is either Spot or On-Demand. + DefaultTargetCapacityType *string `locationName:"defaultTargetCapacityType" type:"string" enum:"DefaultTargetCapacityType"` + + // The number of On-Demand units to request. + OnDemandTargetCapacity *int64 `locationName:"onDemandTargetCapacity" type:"integer"` + + // The maximum number of Spot units to launch. + SpotTargetCapacity *int64 `locationName:"spotTargetCapacity" type:"integer"` + + // The number of units to request, filled using DefaultTargetCapacityType. + TotalTargetCapacity *int64 `locationName:"totalTargetCapacity" type:"integer"` +} + +// String returns the string representation +func (s TargetCapacitySpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetCapacitySpecification) GoString() string { + return s.String() +} + +// SetDefaultTargetCapacityType sets the DefaultTargetCapacityType field's value. +func (s *TargetCapacitySpecification) SetDefaultTargetCapacityType(v string) *TargetCapacitySpecification { + s.DefaultTargetCapacityType = &v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *TargetCapacitySpecification) SetOnDemandTargetCapacity(v int64) *TargetCapacitySpecification { + s.OnDemandTargetCapacity = &v + return s +} + +// SetSpotTargetCapacity sets the SpotTargetCapacity field's value. +func (s *TargetCapacitySpecification) SetSpotTargetCapacity(v int64) *TargetCapacitySpecification { + s.SpotTargetCapacity = &v + return s +} + +// SetTotalTargetCapacity sets the TotalTargetCapacity field's value. +func (s *TargetCapacitySpecification) SetTotalTargetCapacity(v int64) *TargetCapacitySpecification { + s.TotalTargetCapacity = &v + return s +} + +// The number of units to request. You can choose to set the target capacity +// in terms of instances or a performance characteristic that is important to +// your application workload, such as vCPUs, memory, or I/O. If the request +// type is maintain, you can specify a target capacity of 0 and add capacity +// later. +type TargetCapacitySpecificationRequest struct { + _ struct{} `type:"structure"` + + // The default TotalTargetCapacity, which is either Spot or On-Demand. + DefaultTargetCapacityType *string `type:"string" enum:"DefaultTargetCapacityType"` + + // The number of On-Demand units to request. + OnDemandTargetCapacity *int64 `type:"integer"` + + // The number of Spot units to request. + SpotTargetCapacity *int64 `type:"integer"` + + // The number of units to request, filled using DefaultTargetCapacityType. + // + // TotalTargetCapacity is a required field + TotalTargetCapacity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s TargetCapacitySpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetCapacitySpecificationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetCapacitySpecificationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetCapacitySpecificationRequest"} + if s.TotalTargetCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("TotalTargetCapacity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultTargetCapacityType sets the DefaultTargetCapacityType field's value. +func (s *TargetCapacitySpecificationRequest) SetDefaultTargetCapacityType(v string) *TargetCapacitySpecificationRequest { + s.DefaultTargetCapacityType = &v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *TargetCapacitySpecificationRequest) SetOnDemandTargetCapacity(v int64) *TargetCapacitySpecificationRequest { + s.OnDemandTargetCapacity = &v + return s +} + +// SetSpotTargetCapacity sets the SpotTargetCapacity field's value. +func (s *TargetCapacitySpecificationRequest) SetSpotTargetCapacity(v int64) *TargetCapacitySpecificationRequest { + s.SpotTargetCapacity = &v + return s +} + +// SetTotalTargetCapacity sets the TotalTargetCapacity field's value. +func (s *TargetCapacitySpecificationRequest) SetTotalTargetCapacity(v int64) *TargetCapacitySpecificationRequest { + s.TotalTargetCapacity = &v + return s +} + // Information about the Convertible Reserved Instance offering. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TargetConfiguration type TargetConfiguration struct { _ struct{} `type:"structure"` @@ -65202,7 +67310,6 @@ func (s *TargetConfiguration) SetOfferingId(v string) *TargetConfiguration { } // Details about the target configuration. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TargetConfigurationRequest type TargetConfigurationRequest struct { _ struct{} `type:"structure"` @@ -65252,7 +67359,6 @@ func (s *TargetConfigurationRequest) SetOfferingId(v string) *TargetConfiguratio } // Describes a load balancer target group. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TargetGroup type TargetGroup struct { _ struct{} `type:"structure"` @@ -65293,7 +67399,6 @@ func (s *TargetGroup) SetArn(v string) *TargetGroup { // Describes the target groups to attach to a Spot Fleet. Spot Fleet registers // the running Spot Instances with these target groups. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TargetGroupsConfig type TargetGroupsConfig struct { _ struct{} `type:"structure"` @@ -65346,7 +67451,6 @@ func (s *TargetGroupsConfig) SetTargetGroups(v []*TargetGroup) *TargetGroupsConf } // The total value of the new Convertible Reserved Instances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TargetReservationValue type TargetReservationValue struct { _ struct{} `type:"structure"` @@ -65383,7 +67487,6 @@ func (s *TargetReservationValue) SetTargetConfiguration(v *TargetConfiguration) } // Contains the parameters for TerminateInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateInstancesRequest type TerminateInstancesInput struct { _ struct{} `type:"structure"` @@ -65438,7 +67541,6 @@ func (s *TerminateInstancesInput) SetInstanceIds(v []*string) *TerminateInstance } // Contains the output of TerminateInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateInstancesResult type TerminateInstancesOutput struct { _ struct{} `type:"structure"` @@ -65462,7 +67564,6 @@ func (s *TerminateInstancesOutput) SetTerminatingInstances(v []*InstanceStateCha return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignIpv6AddressesRequest type UnassignIpv6AddressesInput struct { _ struct{} `type:"structure"` @@ -65515,7 +67616,6 @@ func (s *UnassignIpv6AddressesInput) SetNetworkInterfaceId(v string) *UnassignIp return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignIpv6AddressesResult type UnassignIpv6AddressesOutput struct { _ struct{} `type:"structure"` @@ -65549,7 +67649,6 @@ func (s *UnassignIpv6AddressesOutput) SetUnassignedIpv6Addresses(v []*string) *U } // Contains the parameters for UnassignPrivateIpAddresses. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateIpAddressesRequest type UnassignPrivateIpAddressesInput struct { _ struct{} `type:"structure"` @@ -65603,7 +67702,6 @@ func (s *UnassignPrivateIpAddressesInput) SetPrivateIpAddresses(v []*string) *Un return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateIpAddressesOutput type UnassignPrivateIpAddressesOutput struct { _ struct{} `type:"structure"` } @@ -65619,7 +67717,6 @@ func (s UnassignPrivateIpAddressesOutput) GoString() string { } // Contains the parameters for UnmonitorInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnmonitorInstancesRequest type UnmonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -65671,7 +67768,6 @@ func (s *UnmonitorInstancesInput) SetInstanceIds(v []*string) *UnmonitorInstance } // Contains the output of UnmonitorInstances. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnmonitorInstancesResult type UnmonitorInstancesOutput struct { _ struct{} `type:"structure"` @@ -65696,7 +67792,6 @@ func (s *UnmonitorInstancesOutput) SetInstanceMonitorings(v []*InstanceMonitorin } // Describes the T2 instance whose credit option for CPU usage was not modified. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnsuccessfulInstanceCreditSpecificationItem type UnsuccessfulInstanceCreditSpecificationItem struct { _ struct{} `type:"structure"` @@ -65732,7 +67827,6 @@ func (s *UnsuccessfulInstanceCreditSpecificationItem) SetInstanceId(v string) *U // Information about the error for the T2 instance whose credit option for CPU // usage was not modified. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnsuccessfulInstanceCreditSpecificationItemError type UnsuccessfulInstanceCreditSpecificationItemError struct { _ struct{} `type:"structure"` @@ -65766,7 +67860,6 @@ func (s *UnsuccessfulInstanceCreditSpecificationItemError) SetMessage(v string) } // Information about items that were not successfully processed in a batch call. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnsuccessfulItem type UnsuccessfulItem struct { _ struct{} `type:"structure"` @@ -65803,7 +67896,6 @@ func (s *UnsuccessfulItem) SetResourceId(v string) *UnsuccessfulItem { // Information about the error that occurred. For more information about errors, // see Error Codes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnsuccessfulItemError type UnsuccessfulItemError struct { _ struct{} `type:"structure"` @@ -65841,7 +67933,6 @@ func (s *UnsuccessfulItemError) SetMessage(v string) *UnsuccessfulItemError { } // Contains the parameters for UpdateSecurityGroupRuleDescriptionsEgress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgressRequest type UpdateSecurityGroupRuleDescriptionsEgressInput struct { _ struct{} `type:"structure"` @@ -65914,7 +68005,6 @@ func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetIpPermissions(v []*I } // Contains the output of UpdateSecurityGroupRuleDescriptionsEgress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgressResult type UpdateSecurityGroupRuleDescriptionsEgressOutput struct { _ struct{} `type:"structure"` @@ -65939,7 +68029,6 @@ func (s *UpdateSecurityGroupRuleDescriptionsEgressOutput) SetReturn(v bool) *Upd } // Contains the parameters for UpdateSecurityGroupRuleDescriptionsIngress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngressRequest type UpdateSecurityGroupRuleDescriptionsIngressInput struct { _ struct{} `type:"structure"` @@ -66012,7 +68101,6 @@ func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetIpPermissions(v []* } // Contains the output of UpdateSecurityGroupRuleDescriptionsIngress. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngressResult type UpdateSecurityGroupRuleDescriptionsIngressOutput struct { _ struct{} `type:"structure"` @@ -66037,7 +68125,6 @@ func (s *UpdateSecurityGroupRuleDescriptionsIngressOutput) SetReturn(v bool) *Up } // Describes the S3 bucket for the disk image. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UserBucket type UserBucket struct { _ struct{} `type:"structure"` @@ -66071,7 +68158,6 @@ func (s *UserBucket) SetS3Key(v string) *UserBucket { } // Describes the S3 bucket for the disk image. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UserBucketDetails type UserBucketDetails struct { _ struct{} `type:"structure"` @@ -66105,7 +68191,6 @@ func (s *UserBucketDetails) SetS3Key(v string) *UserBucketDetails { } // Describes the user data for an instance. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UserData type UserData struct { _ struct{} `type:"structure"` @@ -66132,7 +68217,6 @@ func (s *UserData) SetData(v string) *UserData { } // Describes a security group and AWS account ID pair. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UserIdGroupPair type UserIdGroupPair struct { _ struct{} `type:"structure"` @@ -66149,13 +68233,19 @@ type UserIdGroupPair struct { // The name of the security group. In a request, use this parameter for a security // group in EC2-Classic or a default VPC only. For a security group in a nondefault // VPC, use the security group ID. + // + // For a referenced security group in another VPC, this value is not returned + // if the referenced security group is deleted. GroupName *string `locationName:"groupName" type:"string"` // The status of a VPC peering connection, if applicable. PeeringStatus *string `locationName:"peeringStatus" type:"string"` - // The ID of an AWS account. For a referenced security group in another VPC, - // the account ID of the referenced security group is returned. + // The ID of an AWS account. + // + // For a referenced security group in another VPC, the account ID of the referenced + // security group is returned in the response. If the referenced security group + // is deleted, this value is not returned. // // [EC2-Classic] Required when adding or removing rules that reference a security // group in another AWS account. @@ -66221,7 +68311,6 @@ func (s *UserIdGroupPair) SetVpcPeeringConnectionId(v string) *UserIdGroupPair { } // Describes telemetry for a VPN tunnel. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VgwTelemetry type VgwTelemetry struct { _ struct{} `type:"structure"` @@ -66229,7 +68318,7 @@ type VgwTelemetry struct { AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` // The date and time of the last change in status. - LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"iso8601"` + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp"` // The Internet-routable IP address of the virtual private gateway's outside // interface. @@ -66283,7 +68372,6 @@ func (s *VgwTelemetry) SetStatusMessage(v string) *VgwTelemetry { } // Describes a volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Volume type Volume struct { _ struct{} `type:"structure"` @@ -66294,7 +68382,7 @@ type Volume struct { AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The time stamp when volume creation was initiated. - CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` // Indicates whether the volume will be encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` @@ -66303,11 +68391,12 @@ type Volume struct { // For Provisioned IOPS SSD volumes, this represents the number of IOPS that // are provisioned for the volume. For General Purpose SSD volumes, this represents // the baseline performance of the volume and the rate at which the volume accumulates - // I/O credits for bursting. For more information on General Purpose SSD baseline - // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // I/O credits for bursting. For more information about General Purpose SSD + // baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for + // Constraint: Range is 100-32000 IOPS for io1 volumes and 100-10000 IOPS for // gp2 volumes. // // Condition: This parameter is required for requests to create io1 volumes; @@ -66422,12 +68511,11 @@ func (s *Volume) SetVolumeType(v string) *Volume { } // Describes volume attachment details. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeAttachment type VolumeAttachment struct { _ struct{} `type:"structure"` // The time stamp when the attachment initiated. - AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` // Indicates whether the EBS volume is deleted on instance termination. DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` @@ -66492,7 +68580,6 @@ func (s *VolumeAttachment) SetVolumeId(v string) *VolumeAttachment { } // Describes an EBS volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeDetail type VolumeDetail struct { _ struct{} `type:"structure"` @@ -66534,45 +68621,44 @@ func (s *VolumeDetail) SetSize(v int64) *VolumeDetail { // Describes the modification status of an EBS volume. // // If the volume has never been modified, some element values will be null. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeModification type VolumeModification struct { _ struct{} `type:"structure"` - // Modification completion or failure time. - EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + // The modification completion or failure time. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // Current state of modification. Modification state is null for unmodified + // The current modification state. The modification state is null for unmodified // volumes. ModificationState *string `locationName:"modificationState" type:"string" enum:"VolumeModificationState"` - // Original IOPS rate of the volume being modified. + // The original IOPS rate of the volume. OriginalIops *int64 `locationName:"originalIops" type:"integer"` - // Original size of the volume being modified. + // The original size of the volume. OriginalSize *int64 `locationName:"originalSize" type:"integer"` - // Original EBS volume type of the volume being modified. + // The original EBS volume type of the volume. OriginalVolumeType *string `locationName:"originalVolumeType" type:"string" enum:"VolumeType"` - // Modification progress from 0 to 100%. + // The modification progress, from 0 to 100 percent complete. Progress *int64 `locationName:"progress" type:"long"` - // Modification start time - StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + // The modification start time. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` - // Generic status message on modification progress or failure. + // A status message about the modification progress or failure. StatusMessage *string `locationName:"statusMessage" type:"string"` - // Target IOPS rate of the volume being modified. + // The target IOPS rate of the volume. TargetIops *int64 `locationName:"targetIops" type:"integer"` - // Target size of the volume being modified. + // The target size of the volume, in GiB. TargetSize *int64 `locationName:"targetSize" type:"integer"` - // Target EBS volume type of the volume being modified. + // The target EBS volume type of the volume. TargetVolumeType *string `locationName:"targetVolumeType" type:"string" enum:"VolumeType"` - // ID of the volume being modified. + // The ID of the volume. VolumeId *string `locationName:"volumeId" type:"string"` } @@ -66659,7 +68745,6 @@ func (s *VolumeModification) SetVolumeId(v string) *VolumeModification { } // Describes a volume status operation code. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeStatusAction type VolumeStatusAction struct { _ struct{} `type:"structure"` @@ -66711,7 +68796,6 @@ func (s *VolumeStatusAction) SetEventType(v string) *VolumeStatusAction { } // Describes a volume status. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeStatusDetails type VolumeStatusDetails struct { _ struct{} `type:"structure"` @@ -66745,7 +68829,6 @@ func (s *VolumeStatusDetails) SetStatus(v string) *VolumeStatusDetails { } // Describes a volume status event. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeStatusEvent type VolumeStatusEvent struct { _ struct{} `type:"structure"` @@ -66759,10 +68842,10 @@ type VolumeStatusEvent struct { EventType *string `locationName:"eventType" type:"string"` // The latest end time of the event. - NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` // The earliest start time of the event. - NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` + NotBefore *time.Time `locationName:"notBefore" type:"timestamp"` } // String returns the string representation @@ -66806,7 +68889,6 @@ func (s *VolumeStatusEvent) SetNotBefore(v time.Time) *VolumeStatusEvent { } // Describes the status of a volume. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeStatusInfo type VolumeStatusInfo struct { _ struct{} `type:"structure"` @@ -66840,7 +68922,6 @@ func (s *VolumeStatusInfo) SetStatus(v string) *VolumeStatusInfo { } // Describes the volume status. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeStatusItem type VolumeStatusItem struct { _ struct{} `type:"structure"` @@ -66901,7 +68982,6 @@ func (s *VolumeStatusItem) SetVolumeStatus(v *VolumeStatusInfo) *VolumeStatusIte } // Describes a VPC. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Vpc type Vpc struct { _ struct{} `type:"structure"` @@ -66999,7 +69079,6 @@ func (s *Vpc) SetVpcId(v string) *Vpc { } // Describes an attachment between a virtual private gateway and a VPC. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcAttachment type VpcAttachment struct { _ struct{} `type:"structure"` @@ -67033,7 +69112,6 @@ func (s *VpcAttachment) SetVpcId(v string) *VpcAttachment { } // Describes an IPv4 CIDR block associated with a VPC. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcCidrBlockAssociation type VpcCidrBlockAssociation struct { _ struct{} `type:"structure"` @@ -67076,7 +69154,6 @@ func (s *VpcCidrBlockAssociation) SetCidrBlockState(v *VpcCidrBlockState) *VpcCi } // Describes the state of a CIDR block. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcCidrBlockState type VpcCidrBlockState struct { _ struct{} `type:"structure"` @@ -67110,7 +69187,6 @@ func (s *VpcCidrBlockState) SetStatusMessage(v string) *VpcCidrBlockState { } // Describes whether a VPC is enabled for ClassicLink. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcClassicLink type VpcClassicLink struct { _ struct{} `type:"structure"` @@ -67153,12 +69229,11 @@ func (s *VpcClassicLink) SetVpcId(v string) *VpcClassicLink { } // Describes a VPC endpoint. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcEndpoint type VpcEndpoint struct { _ struct{} `type:"structure"` // The date and time the VPC endpoint was created. - CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"` + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp"` // (Interface endpoint) The DNS entries for the endpoint. DnsEntries []*DnsEntry `locationName:"dnsEntrySet" locationNameList:"item" type:"list"` @@ -67288,12 +69363,11 @@ func (s *VpcEndpoint) SetVpcId(v string) *VpcEndpoint { } // Describes a VPC endpoint connection to a service. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcEndpointConnection type VpcEndpointConnection struct { _ struct{} `type:"structure"` // The date and time the VPC endpoint was created. - CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"` + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp"` // The ID of the service to which the endpoint is connected. ServiceId *string `locationName:"serviceId" type:"string"` @@ -67349,7 +69423,6 @@ func (s *VpcEndpointConnection) SetVpcEndpointState(v string) *VpcEndpointConnec } // Describes an IPv6 CIDR block associated with a VPC. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcIpv6CidrBlockAssociation type VpcIpv6CidrBlockAssociation struct { _ struct{} `type:"structure"` @@ -67392,7 +69465,6 @@ func (s *VpcIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *VpcCidrBlockState } // Describes a VPC peering connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcPeeringConnection type VpcPeeringConnection struct { _ struct{} `type:"structure"` @@ -67401,7 +69473,7 @@ type VpcPeeringConnection struct { AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` // The time that an unaccepted VPC peering connection will expire. - ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"` + ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp"` // Information about the requester VPC. CIDR block information is only returned // when describing an active VPC peering connection. @@ -67464,7 +69536,6 @@ func (s *VpcPeeringConnection) SetVpcPeeringConnectionId(v string) *VpcPeeringCo } // Describes the VPC peering connection options. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcPeeringConnectionOptionsDescription type VpcPeeringConnectionOptionsDescription struct { _ struct{} `type:"structure"` @@ -67510,7 +69581,6 @@ func (s *VpcPeeringConnectionOptionsDescription) SetAllowEgressFromLocalVpcToRem } // Describes the status of a VPC peering connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcPeeringConnectionStateReason type VpcPeeringConnectionStateReason struct { _ struct{} `type:"structure"` @@ -67544,7 +69614,6 @@ func (s *VpcPeeringConnectionStateReason) SetMessage(v string) *VpcPeeringConnec } // Describes a VPC in a VPC peering connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcPeeringConnectionVpcInfo type VpcPeeringConnectionVpcInfo struct { _ struct{} `type:"structure"` @@ -67624,7 +69693,6 @@ func (s *VpcPeeringConnectionVpcInfo) SetVpcId(v string) *VpcPeeringConnectionVp } // Describes a VPN connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnConnection type VpnConnection struct { _ struct{} `type:"structure"` @@ -67745,7 +69813,6 @@ func (s *VpnConnection) SetVpnGatewayId(v string) *VpnConnection { } // Describes VPN connection options. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnConnectionOptions type VpnConnectionOptions struct { _ struct{} `type:"structure"` @@ -67771,13 +69838,12 @@ func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions } // Describes VPN connection options. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnConnectionOptionsSpecification type VpnConnectionOptionsSpecification struct { _ struct{} `type:"structure"` // Indicate whether the VPN connection uses static routes only. If you are creating // a VPN connection for a device that does not support BGP, you must specify - // true. + // true. Use CreateVpnConnectionRoute to create a static route. // // Default: false StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` @@ -67809,7 +69875,6 @@ func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptio } // Describes a virtual private gateway. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnGateway type VpnGateway struct { _ struct{} `type:"structure"` @@ -67889,7 +69954,6 @@ func (s *VpnGateway) SetVpnGatewayId(v string) *VpnGateway { } // Describes a static route for a VPN connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnStaticRoute type VpnStaticRoute struct { _ struct{} `type:"structure"` @@ -67932,7 +69996,6 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { } // The tunnel options for a VPN connection. -// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnTunnelOptionsSpecification type VpnTunnelOptionsSpecification struct { _ struct{} `type:"structure"` @@ -68209,6 +70272,28 @@ const ( DatafeedSubscriptionStateInactive = "Inactive" ) +const ( + // DefaultTargetCapacityTypeSpot is a DefaultTargetCapacityType enum value + DefaultTargetCapacityTypeSpot = "spot" + + // DefaultTargetCapacityTypeOnDemand is a DefaultTargetCapacityType enum value + DefaultTargetCapacityTypeOnDemand = "on-demand" +) + +const ( + // DeleteFleetErrorCodeFleetIdDoesNotExist is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeFleetIdDoesNotExist = "fleetIdDoesNotExist" + + // DeleteFleetErrorCodeFleetIdMalformed is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeFleetIdMalformed = "fleetIdMalformed" + + // DeleteFleetErrorCodeFleetNotInDeletableState is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeFleetNotInDeletableState = "fleetNotInDeletableState" + + // DeleteFleetErrorCodeUnexpectedError is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeUnexpectedError = "unexpectedError" +) + const ( // DeviceTypeEbs is a DeviceType enum value DeviceTypeEbs = "ebs" @@ -68310,6 +70395,70 @@ const ( ExportTaskStateCompleted = "completed" ) +const ( + // FleetActivityStatusError is a FleetActivityStatus enum value + FleetActivityStatusError = "error" + + // FleetActivityStatusPendingFulfillment is a FleetActivityStatus enum value + FleetActivityStatusPendingFulfillment = "pending-fulfillment" + + // FleetActivityStatusPendingTermination is a FleetActivityStatus enum value + FleetActivityStatusPendingTermination = "pending-termination" + + // FleetActivityStatusFulfilled is a FleetActivityStatus enum value + FleetActivityStatusFulfilled = "fulfilled" +) + +const ( + // FleetEventTypeInstanceChange is a FleetEventType enum value + FleetEventTypeInstanceChange = "instance-change" + + // FleetEventTypeFleetChange is a FleetEventType enum value + FleetEventTypeFleetChange = "fleet-change" + + // FleetEventTypeServiceError is a FleetEventType enum value + FleetEventTypeServiceError = "service-error" +) + +const ( + // FleetExcessCapacityTerminationPolicyNoTermination is a FleetExcessCapacityTerminationPolicy enum value + FleetExcessCapacityTerminationPolicyNoTermination = "no-termination" + + // FleetExcessCapacityTerminationPolicyTermination is a FleetExcessCapacityTerminationPolicy enum value + FleetExcessCapacityTerminationPolicyTermination = "termination" +) + +const ( + // FleetOnDemandAllocationStrategyLowestPrice is a FleetOnDemandAllocationStrategy enum value + FleetOnDemandAllocationStrategyLowestPrice = "lowest-price" + + // FleetOnDemandAllocationStrategyPrioritized is a FleetOnDemandAllocationStrategy enum value + FleetOnDemandAllocationStrategyPrioritized = "prioritized" +) + +const ( + // FleetStateCodeSubmitted is a FleetStateCode enum value + FleetStateCodeSubmitted = "submitted" + + // FleetStateCodeActive is a FleetStateCode enum value + FleetStateCodeActive = "active" + + // FleetStateCodeDeleted is a FleetStateCode enum value + FleetStateCodeDeleted = "deleted" + + // FleetStateCodeFailed is a FleetStateCode enum value + FleetStateCodeFailed = "failed" + + // FleetStateCodeDeletedRunning is a FleetStateCode enum value + FleetStateCodeDeletedRunning = "deleted-running" + + // FleetStateCodeDeletedTerminating is a FleetStateCode enum value + FleetStateCodeDeletedTerminating = "deleted-terminating" + + // FleetStateCodeModifying is a FleetStateCode enum value + FleetStateCodeModifying = "modifying" +) + const ( // FleetTypeRequest is a FleetType enum value FleetTypeRequest = "request" @@ -68652,6 +70801,60 @@ const ( // InstanceTypeR416xlarge is a InstanceType enum value InstanceTypeR416xlarge = "r4.16xlarge" + // InstanceTypeR5Large is a InstanceType enum value + InstanceTypeR5Large = "r5.large" + + // InstanceTypeR5Xlarge is a InstanceType enum value + InstanceTypeR5Xlarge = "r5.xlarge" + + // InstanceTypeR52xlarge is a InstanceType enum value + InstanceTypeR52xlarge = "r5.2xlarge" + + // InstanceTypeR54xlarge is a InstanceType enum value + InstanceTypeR54xlarge = "r5.4xlarge" + + // InstanceTypeR58xlarge is a InstanceType enum value + InstanceTypeR58xlarge = "r5.8xlarge" + + // InstanceTypeR512xlarge is a InstanceType enum value + InstanceTypeR512xlarge = "r5.12xlarge" + + // InstanceTypeR516xlarge is a InstanceType enum value + InstanceTypeR516xlarge = "r5.16xlarge" + + // InstanceTypeR524xlarge is a InstanceType enum value + InstanceTypeR524xlarge = "r5.24xlarge" + + // InstanceTypeR5Metal is a InstanceType enum value + InstanceTypeR5Metal = "r5.metal" + + // InstanceTypeR5dLarge is a InstanceType enum value + InstanceTypeR5dLarge = "r5d.large" + + // InstanceTypeR5dXlarge is a InstanceType enum value + InstanceTypeR5dXlarge = "r5d.xlarge" + + // InstanceTypeR5d2xlarge is a InstanceType enum value + InstanceTypeR5d2xlarge = "r5d.2xlarge" + + // InstanceTypeR5d4xlarge is a InstanceType enum value + InstanceTypeR5d4xlarge = "r5d.4xlarge" + + // InstanceTypeR5d8xlarge is a InstanceType enum value + InstanceTypeR5d8xlarge = "r5d.8xlarge" + + // InstanceTypeR5d12xlarge is a InstanceType enum value + InstanceTypeR5d12xlarge = "r5d.12xlarge" + + // InstanceTypeR5d16xlarge is a InstanceType enum value + InstanceTypeR5d16xlarge = "r5d.16xlarge" + + // InstanceTypeR5d24xlarge is a InstanceType enum value + InstanceTypeR5d24xlarge = "r5d.24xlarge" + + // InstanceTypeR5dMetal is a InstanceType enum value + InstanceTypeR5dMetal = "r5d.metal" + // InstanceTypeX116xlarge is a InstanceType enum value InstanceTypeX116xlarge = "x1.16xlarge" @@ -68706,6 +70909,9 @@ const ( // InstanceTypeI316xlarge is a InstanceType enum value InstanceTypeI316xlarge = "i3.16xlarge" + // InstanceTypeI3Metal is a InstanceType enum value + InstanceTypeI3Metal = "i3.metal" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" @@ -68766,6 +70972,24 @@ const ( // InstanceTypeC518xlarge is a InstanceType enum value InstanceTypeC518xlarge = "c5.18xlarge" + // InstanceTypeC5dLarge is a InstanceType enum value + InstanceTypeC5dLarge = "c5d.large" + + // InstanceTypeC5dXlarge is a InstanceType enum value + InstanceTypeC5dXlarge = "c5d.xlarge" + + // InstanceTypeC5d2xlarge is a InstanceType enum value + InstanceTypeC5d2xlarge = "c5d.2xlarge" + + // InstanceTypeC5d4xlarge is a InstanceType enum value + InstanceTypeC5d4xlarge = "c5d.4xlarge" + + // InstanceTypeC5d9xlarge is a InstanceType enum value + InstanceTypeC5d9xlarge = "c5d.9xlarge" + + // InstanceTypeC5d18xlarge is a InstanceType enum value + InstanceTypeC5d18xlarge = "c5d.18xlarge" + // InstanceTypeCc14xlarge is a InstanceType enum value InstanceTypeCc14xlarge = "cc1.4xlarge" @@ -68844,6 +71068,24 @@ const ( // InstanceTypeM524xlarge is a InstanceType enum value InstanceTypeM524xlarge = "m5.24xlarge" + // InstanceTypeM5dLarge is a InstanceType enum value + InstanceTypeM5dLarge = "m5d.large" + + // InstanceTypeM5dXlarge is a InstanceType enum value + InstanceTypeM5dXlarge = "m5d.xlarge" + + // InstanceTypeM5d2xlarge is a InstanceType enum value + InstanceTypeM5d2xlarge = "m5d.2xlarge" + + // InstanceTypeM5d4xlarge is a InstanceType enum value + InstanceTypeM5d4xlarge = "m5d.4xlarge" + + // InstanceTypeM5d12xlarge is a InstanceType enum value + InstanceTypeM5d12xlarge = "m5d.12xlarge" + + // InstanceTypeM5d24xlarge is a InstanceType enum value + InstanceTypeM5d24xlarge = "m5d.24xlarge" + // InstanceTypeH12xlarge is a InstanceType enum value InstanceTypeH12xlarge = "h1.2xlarge" @@ -68855,6 +71097,24 @@ const ( // InstanceTypeH116xlarge is a InstanceType enum value InstanceTypeH116xlarge = "h1.16xlarge" + + // InstanceTypeZ1dLarge is a InstanceType enum value + InstanceTypeZ1dLarge = "z1d.large" + + // InstanceTypeZ1dXlarge is a InstanceType enum value + InstanceTypeZ1dXlarge = "z1d.xlarge" + + // InstanceTypeZ1d2xlarge is a InstanceType enum value + InstanceTypeZ1d2xlarge = "z1d.2xlarge" + + // InstanceTypeZ1d3xlarge is a InstanceType enum value + InstanceTypeZ1d3xlarge = "z1d.3xlarge" + + // InstanceTypeZ1d6xlarge is a InstanceType enum value + InstanceTypeZ1d6xlarge = "z1d.6xlarge" + + // InstanceTypeZ1d12xlarge is a InstanceType enum value + InstanceTypeZ1d12xlarge = "z1d.12xlarge" ) const ( @@ -68913,6 +71173,14 @@ const ( ListingStatusClosed = "closed" ) +const ( + // LogDestinationTypeCloudWatchLogs is a LogDestinationType enum value + LogDestinationTypeCloudWatchLogs = "cloud-watch-logs" + + // LogDestinationTypeS3 is a LogDestinationType enum value + LogDestinationTypeS3 = "s3" +) + const ( // MarketTypeSpot is a MarketType enum value MarketTypeSpot = "spot" @@ -68989,6 +71257,9 @@ const ( // NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value NetworkInterfaceStatusAvailable = "available" + // NetworkInterfaceStatusAssociated is a NetworkInterfaceStatus enum value + NetworkInterfaceStatusAssociated = "associated" + // NetworkInterfaceStatusAttaching is a NetworkInterfaceStatus enum value NetworkInterfaceStatusAttaching = "attaching" @@ -69035,6 +71306,14 @@ const ( OfferingTypeValuesAllUpfront = "All Upfront" ) +const ( + // OnDemandAllocationStrategyLowestPrice is a OnDemandAllocationStrategy enum value + OnDemandAllocationStrategyLowestPrice = "lowestPrice" + + // OnDemandAllocationStrategyPrioritized is a OnDemandAllocationStrategy enum value + OnDemandAllocationStrategyPrioritized = "prioritized" +) + const ( // OperationTypeAdd is a OperationType enum value OperationTypeAdd = "add" @@ -69340,6 +71619,25 @@ const ( SnapshotStateError = "error" ) +const ( + // SpotAllocationStrategyLowestPrice is a SpotAllocationStrategy enum value + SpotAllocationStrategyLowestPrice = "lowest-price" + + // SpotAllocationStrategyDiversified is a SpotAllocationStrategy enum value + SpotAllocationStrategyDiversified = "diversified" +) + +const ( + // SpotInstanceInterruptionBehaviorHibernate is a SpotInstanceInterruptionBehavior enum value + SpotInstanceInterruptionBehaviorHibernate = "hibernate" + + // SpotInstanceInterruptionBehaviorStop is a SpotInstanceInterruptionBehavior enum value + SpotInstanceInterruptionBehaviorStop = "stop" + + // SpotInstanceInterruptionBehaviorTerminate is a SpotInstanceInterruptionBehavior enum value + SpotInstanceInterruptionBehaviorTerminate = "terminate" +) + const ( // SpotInstanceStateOpen is a SpotInstanceState enum value SpotInstanceStateOpen = "open" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index 36b69ff28..7b42719d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -5,11 +5,64 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) +type retryer struct { + client.DefaultRetryer +} + +func (d retryer) RetryRules(r *request.Request) time.Duration { + switch r.Operation.Name { + case opModifyNetworkInterfaceAttribute: + fallthrough + case opAssignPrivateIpAddresses: + return customRetryRule(r) + default: + return d.DefaultRetryer.RetryRules(r) + } +} + +func customRetryRule(r *request.Request) time.Duration { + retryTimes := []time.Duration{ + time.Second, + 3 * time.Second, + 5 * time.Second, + } + + count := r.RetryCount + if count >= len(retryTimes) { + count = len(retryTimes) - 1 + } + + minTime := int(retryTimes[count]) + return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime) +} + +func setCustomRetryer(c *client.Client) { + maxRetries := aws.IntValue(c.Config.MaxRetries) + if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + + c.Retryer = retryer{ + DefaultRetryer: client.DefaultRetryer{ + NumMaxRetries: maxRetries, + }, + } +} + func init() { + initClient = func(c *client.Client) { + if c.Config.Retryer == nil { + // Only override the retryer with a custom one if the config + // does not already contain a retryer + setCustomRetryer(c) + } + } initRequest = func(r *request.Request) { if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter r.Handlers.Build.PushFront(fillPresignedURL) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index 1ba51125e..c258e0e85 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -3,10 +3,22 @@ // Package ec2 provides the client and types for making API // requests to Amazon Elastic Compute Cloud. // -// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity -// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your -// need to invest in hardware up front, so you can develop and deploy applications -// faster. +// Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing +// capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest +// in hardware up front, so you can develop and deploy applications faster. +// +// To learn more about Amazon EC2, Amazon EBS, and Amazon VPC, see the following +// resources: +// +// * Amazon EC2 product page (http://aws.amazon.com/ec2) +// +// * Amazon EC2 documentation (http://aws.amazon.com/documentation/ec2) +// +// * Amazon EBS product page (http://aws.amazon.com/ebs) +// +// * Amazon VPC product page (http://aws.amazon.com/vpc) +// +// * Amazon VPC documentation (http://aws.amazon.com/documentation/vpc) // // See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index ba4433d38..6acbc43fe 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "ec2" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "ec2" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "EC2" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the EC2 client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 23f0a06db..6f89a796e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -14,7 +14,7 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -88,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) // in the IAM User Guide. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: you cannot call @@ -121,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // the user to call AssumeRole on the ARN of the role in the other account. // If the user is in the same account as the role, then you can either attach // a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. // // Using MFA with AssumeRole // @@ -194,7 +208,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -247,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot @@ -367,7 +390,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -438,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service APIs. // -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: @@ -492,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // the information from these providers to get and use temporary security // credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of // how to use web identity federation to get access to content in Amazon // S3. @@ -569,7 +601,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -681,7 +713,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -756,7 +788,7 @@ const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -925,7 +957,7 @@ const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1049,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken return out, req.Send() } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest type AssumeRoleInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1241,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1295,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1436,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -1548,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1711,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -1804,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin // The identifiers for the temporary security credentials that the operation // returns. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser type AssumedRoleUser struct { _ struct{} `type:"structure"` @@ -1847,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { } // AWS credentials for API authentication. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials type Credentials struct { _ struct{} `type:"structure"` @@ -1859,7 +1908,7 @@ type Credentials struct { // The date on which the current credentials expire. // // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + Expiration *time.Time `type:"timestamp" required:"true"` // The secret access key that can be used to sign requests. // @@ -1906,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` @@ -1951,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -1976,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu } // Identifiers for the federated user that is associated with the credentials. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser type FederatedUser struct { _ struct{} `type:"structure"` @@ -2017,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2034,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string { // Contains the response to a successful GetCallerIdentity request, including // information about the entity making the request. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` @@ -2080,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest type GetFederationTokenInput struct { _ struct{} `type:"structure"` @@ -2189,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse type GetFederationTokenOutput struct { _ struct{} `type:"structure"` @@ -2242,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo return s } -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest type GetSessionTokenInput struct { _ struct{} `type:"structure"` @@ -2327,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse type GetSessionTokenOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 1ee5839e0..185c914d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the STS client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, diff --git a/vendor/github.com/bronze1man/goStrongswanVici/go.mod b/vendor/github.com/bronze1man/goStrongswanVici/go.mod deleted file mode 100644 index f106e8b14..000000000 --- a/vendor/github.com/bronze1man/goStrongswanVici/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/bronze1man/goStrongswanVici - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 -) diff --git a/vendor/github.com/bronze1man/goStrongswanVici/go.sum b/vendor/github.com/bronze1man/goStrongswanVici/go.sum deleted file mode 100644 index e03ee77d9..000000000 --- a/vendor/github.com/bronze1man/goStrongswanVici/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/vendor/github.com/containerd/cgroups/stats/v1/doc.go new file mode 100644 index 000000000..23f3cdd4b --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1 diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go new file mode 100644 index 000000000..6d2d41770 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go @@ -0,0 +1,6125 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/cgroups/stats/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metrics struct { + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metrics) Reset() { *m = Metrics{} } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{0} +} +func (m *Metrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) +} +func (m *Metrics) XXX_Size() int { + return m.Size() +} +func (m *Metrics) XXX_DiscardUnknown() { + xxx_messageInfo_Metrics.DiscardUnknown(m) +} + +var xxx_messageInfo_Metrics proto.InternalMessageInfo + +type HugetlbStat struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } +func (*HugetlbStat) ProtoMessage() {} +func (*HugetlbStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{1} +} +func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HugetlbStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_HugetlbStat.Merge(m, src) +} +func (m *HugetlbStat) XXX_Size() int { + return m.Size() +} +func (m *HugetlbStat) XXX_DiscardUnknown() { + xxx_messageInfo_HugetlbStat.DiscardUnknown(m) +} + +var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo + +type PidsStat struct { + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PidsStat) Reset() { *m = PidsStat{} } +func (*PidsStat) ProtoMessage() {} +func (*PidsStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{2} +} +func (m *PidsStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PidsStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_PidsStat.Merge(m, src) +} +func (m *PidsStat) XXX_Size() int { + return m.Size() +} +func (m *PidsStat) XXX_DiscardUnknown() { + xxx_messageInfo_PidsStat.DiscardUnknown(m) +} + +var xxx_messageInfo_PidsStat proto.InternalMessageInfo + +type CPUStat struct { + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUStat) Reset() { *m = CPUStat{} } +func (*CPUStat) ProtoMessage() {} +func (*CPUStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{3} +} +func (m *CPUStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUStat.Merge(m, src) +} +func (m *CPUStat) XXX_Size() int { + return m.Size() +} +func (m *CPUStat) XXX_DiscardUnknown() { + xxx_messageInfo_CPUStat.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUStat proto.InternalMessageInfo + +type CPUUsage struct { + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{4} +} +func (m *CPUUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUUsage.Merge(m, src) +} +func (m *CPUUsage) XXX_Size() int { + return m.Size() +} +func (m *CPUUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CPUUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUUsage proto.InternalMessageInfo + +type Throttle struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Throttle) Reset() { *m = Throttle{} } +func (*Throttle) ProtoMessage() {} +func (*Throttle) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{5} +} +func (m *Throttle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Throttle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Throttle.Merge(m, src) +} +func (m *Throttle) XXX_Size() int { + return m.Size() +} +func (m *Throttle) XXX_DiscardUnknown() { + xxx_messageInfo_Throttle.DiscardUnknown(m) +} + +var xxx_messageInfo_Throttle proto.InternalMessageInfo + +type MemoryStat struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryStat) Reset() { *m = MemoryStat{} } +func (*MemoryStat) ProtoMessage() {} +func (*MemoryStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{6} +} +func (m *MemoryStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryStat.Merge(m, src) +} +func (m *MemoryStat) XXX_Size() int { + return m.Size() +} +func (m *MemoryStat) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryStat.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryStat proto.InternalMessageInfo + +type MemoryEntry struct { + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } +func (*MemoryEntry) ProtoMessage() {} +func (*MemoryEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{7} +} +func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryEntry.Merge(m, src) +} +func (m *MemoryEntry) XXX_Size() int { + return m.Size() +} +func (m *MemoryEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo + +type MemoryOomControl struct { + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } +func (*MemoryOomControl) ProtoMessage() {} +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{8} +} +func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryOomControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryOomControl.Merge(m, src) +} +func (m *MemoryOomControl) XXX_Size() int { + return m.Size() +} +func (m *MemoryOomControl) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo + +type BlkIOStat struct { + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } +func (*BlkIOStat) ProtoMessage() {} +func (*BlkIOStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{9} +} +func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOStat.Merge(m, src) +} +func (m *BlkIOStat) XXX_Size() int { + return m.Size() +} +func (m *BlkIOStat) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOStat.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo + +type BlkIOEntry struct { + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } +func (*BlkIOEntry) ProtoMessage() {} +func (*BlkIOEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{10} +} +func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOEntry.Merge(m, src) +} +func (m *BlkIOEntry) XXX_Size() int { + return m.Size() +} +func (m *BlkIOEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo + +type RdmaStat struct { + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaStat) Reset() { *m = RdmaStat{} } +func (*RdmaStat) ProtoMessage() {} +func (*RdmaStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{11} +} +func (m *RdmaStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaStat.Merge(m, src) +} +func (m *RdmaStat) XXX_Size() int { + return m.Size() +} +func (m *RdmaStat) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaStat.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaStat proto.InternalMessageInfo + +type RdmaEntry struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } +func (*RdmaEntry) ProtoMessage() {} +func (*RdmaEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{12} +} +func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaEntry.Merge(m, src) +} +func (m *RdmaEntry) XXX_Size() int { + return m.Size() +} +func (m *RdmaEntry) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo + +type NetworkStat struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkStat) Reset() { *m = NetworkStat{} } +func (*NetworkStat) ProtoMessage() {} +func (*NetworkStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{13} +} +func (m *NetworkStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkStat.Merge(m, src) +} +func (m *NetworkStat) XXX_Size() int { + return m.Size() +} +func (m *NetworkStat) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkStat.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkStat proto.InternalMessageInfo + +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{14} +} +func (m *CgroupStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CgroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_CgroupStats.Merge(m, src) +} +func (m *CgroupStats) XXX_Size() int { + return m.Size() +} +func (m *CgroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_CgroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_CgroupStats proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") + proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") + proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") + proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") + proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") + proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") + proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") + proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") + proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") + proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") + proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") + proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") + proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") + proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") + proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") +} + +func init() { + proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) +} + +var fileDescriptor_a17b2d87c332bfaa = []byte{ + // 1749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, + 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, + 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, + 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, + 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, + 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, + 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, + 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, + 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, + 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, + 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, + 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, + 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, + 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, + 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, + 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, + 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, + 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, + 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, + 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, + 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, + 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, + 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, + 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, + 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, + 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, + 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, + 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, + 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, + 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, + 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, + 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, + 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, + 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, + 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, + 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, + 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, + 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, + 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, + 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, + 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, + 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, + 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, + 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, + 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, + 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, + 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, + 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, + 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, + 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, + 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, + 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, + 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, + 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, + 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, + 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, + 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, + 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, + 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, + 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, + 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, + 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, + 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, + 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, + 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, + 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, + 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, + 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, + 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, + 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, + 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, + 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, + 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, + 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, + 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, + 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, + 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, + 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, + 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, + 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, + 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, + 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, + 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, + 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, + 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, + 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, + 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, + 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, + 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, + 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, + 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, + 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, + 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, + 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, + 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, + 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, + 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, + 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, + 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, + 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, + 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, + 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, + 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, + 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, + 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, + 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, + 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, + 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, + 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, + 0x22, 0xc4, 0x11, 0x00, 0x00, +} + +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemoryOomControl != nil { + { + size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CgroupStats != nil { + { + size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Network) > 0 { + for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Rdma != nil { + { + size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Blkio != nil { + { + size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pids != nil { + { + size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Hugetlb) > 0 { + for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Pagesize) > 0 { + i -= len(m.Pagesize) + copy(dAtA[i:], m.Pagesize) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) + i-- + dAtA[i] = 0x22 + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x18 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PidsStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if m.Current != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CPUStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Throttling != nil { + { + size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CPUUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PerCPU) > 0 { + dAtA11 := make([]byte, len(m.PerCPU)*10) + var j10 int + for _, num := range m.PerCPU { + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x22 + } + if m.User != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + i-- + dAtA[i] = 0x18 + } + if m.Kernel != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) + i-- + dAtA[i] = 0x10 + } + if m.Total != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Throttle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ThrottledTime != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) + i-- + dAtA[i] = 0x18 + } + if m.ThrottledPeriods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + i-- + dAtA[i] = 0x10 + } + if m.Periods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.KernelTCP != nil { + { + size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.Kernel != nil { + { + size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.TotalUnevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.TotalActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.TotalInactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.TotalActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if m.TotalInactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.TotalPgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.TotalPgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.TotalPgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.TotalPgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.TotalWriteback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.TotalDirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.TotalMappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.TotalRSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.TotalRSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.TotalCache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.HierarchicalSwapLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.HierarchicalMemoryLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.Unevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + i-- + dAtA[i] = 0x78 + } + if m.ActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + i-- + dAtA[i] = 0x70 + } + if m.InactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) + i-- + dAtA[i] = 0x68 + } + if m.ActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) + i-- + dAtA[i] = 0x60 + } + if m.InactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) + i-- + dAtA[i] = 0x58 + } + if m.PgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) + i-- + dAtA[i] = 0x50 + } + if m.PgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) + i-- + dAtA[i] = 0x48 + } + if m.PgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) + i-- + dAtA[i] = 0x40 + } + if m.PgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) + i-- + dAtA[i] = 0x38 + } + if m.Writeback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) + i-- + dAtA[i] = 0x30 + } + if m.Dirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) + i-- + dAtA[i] = 0x28 + } + if m.MappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + i-- + dAtA[i] = 0x20 + } + if m.RSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + i-- + dAtA[i] = 0x18 + } + if m.RSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + i-- + dAtA[i] = 0x10 + } + if m.Cache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x20 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x18 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x10 + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OomKill != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) + i-- + dAtA[i] = 0x18 + } + if m.UnderOom != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) + i-- + dAtA[i] = 0x10 + } + if m.OomKillDisable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SectorsRecursive) > 0 { + for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.IoTimeRecursive) > 0 { + for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.IoMergedRecursive) > 0 { + for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.IoQueuedRecursive) > 0 { + for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.IoServicedRecursive) > 0 { + for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.IoServiceBytesRecursive) > 0 { + for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x28 + } + if m.Minor != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) + i-- + dAtA[i] = 0x20 + } + if m.Major != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + i-- + dAtA[i] = 0x18 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x12 + } + if len(m.Op) > 0 { + i -= len(m.Op) + copy(dAtA[i:], m.Op) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RdmaStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Limit) > 0 { + for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Current) > 0 { + for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.HcaObjects != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) + i-- + dAtA[i] = 0x18 + } + if m.HcaHandles != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + i-- + dAtA[i] = 0x10 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + i-- + dAtA[i] = 0x48 + } + if m.TxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + i-- + dAtA[i] = 0x40 + } + if m.TxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + i-- + dAtA[i] = 0x38 + } + if m.TxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + i-- + dAtA[i] = 0x30 + } + if m.RxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + i-- + dAtA[i] = 0x28 + } + if m.RxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + i-- + dAtA[i] = 0x20 + } + if m.RxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + i-- + dAtA[i] = 0x18 + } + if m.RxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CgroupStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NrIoWait != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) + i-- + dAtA[i] = 0x28 + } + if m.NrUninterruptible != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) + i-- + dAtA[i] = 0x20 + } + if m.NrStopped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) + i-- + dAtA[i] = 0x18 + } + if m.NrRunning != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) + i-- + dAtA[i] = 0x10 + } + if m.NrSleeping != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Metrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hugetlb) > 0 { + for _, e := range m.Hugetlb { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Pids != nil { + l = m.Pids.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Blkio != nil { + l = m.Blkio.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Rdma != nil { + l = m.Rdma.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.Network) > 0 { + for _, e := range m.Network { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.CgroupStats != nil { + l = m.CgroupStats.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.MemoryOomControl != nil { + l = m.MemoryOomControl.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HugetlbStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + l = len(m.Pagesize) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PidsStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Current != 0 { + n += 1 + sovMetrics(uint64(m.Current)) + } + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != nil { + l = m.Usage.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Throttling != nil { + l = m.Throttling.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovMetrics(uint64(m.Total)) + } + if m.Kernel != 0 { + n += 1 + sovMetrics(uint64(m.Kernel)) + } + if m.User != 0 { + n += 1 + sovMetrics(uint64(m.User)) + } + if len(m.PerCPU) > 0 { + l = 0 + for _, e := range m.PerCPU { + l += sovMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Throttle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Periods != 0 { + n += 1 + sovMetrics(uint64(m.Periods)) + } + if m.ThrottledPeriods != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) + } + if m.ThrottledTime != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledTime)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cache != 0 { + n += 1 + sovMetrics(uint64(m.Cache)) + } + if m.RSS != 0 { + n += 1 + sovMetrics(uint64(m.RSS)) + } + if m.RSSHuge != 0 { + n += 1 + sovMetrics(uint64(m.RSSHuge)) + } + if m.MappedFile != 0 { + n += 1 + sovMetrics(uint64(m.MappedFile)) + } + if m.Dirty != 0 { + n += 1 + sovMetrics(uint64(m.Dirty)) + } + if m.Writeback != 0 { + n += 1 + sovMetrics(uint64(m.Writeback)) + } + if m.PgPgIn != 0 { + n += 1 + sovMetrics(uint64(m.PgPgIn)) + } + if m.PgPgOut != 0 { + n += 1 + sovMetrics(uint64(m.PgPgOut)) + } + if m.PgFault != 0 { + n += 1 + sovMetrics(uint64(m.PgFault)) + } + if m.PgMajFault != 0 { + n += 1 + sovMetrics(uint64(m.PgMajFault)) + } + if m.InactiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.InactiveAnon)) + } + if m.ActiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.ActiveAnon)) + } + if m.InactiveFile != 0 { + n += 1 + sovMetrics(uint64(m.InactiveFile)) + } + if m.ActiveFile != 0 { + n += 1 + sovMetrics(uint64(m.ActiveFile)) + } + if m.Unevictable != 0 { + n += 1 + sovMetrics(uint64(m.Unevictable)) + } + if m.HierarchicalMemoryLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) + } + if m.HierarchicalSwapLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) + } + if m.TotalCache != 0 { + n += 2 + sovMetrics(uint64(m.TotalCache)) + } + if m.TotalRSS != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSS)) + } + if m.TotalRSSHuge != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) + } + if m.TotalMappedFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalMappedFile)) + } + if m.TotalDirty != 0 { + n += 2 + sovMetrics(uint64(m.TotalDirty)) + } + if m.TotalWriteback != 0 { + n += 2 + sovMetrics(uint64(m.TotalWriteback)) + } + if m.TotalPgPgIn != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) + } + if m.TotalPgPgOut != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) + } + if m.TotalPgFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgFault)) + } + if m.TotalPgMajFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) + } + if m.TotalInactiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) + } + if m.TotalActiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) + } + if m.TotalInactiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) + } + if m.TotalActiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveFile)) + } + if m.TotalUnevictable != 0 { + n += 2 + sovMetrics(uint64(m.TotalUnevictable)) + } + if m.Usage != nil { + l = m.Usage.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Kernel != nil { + l = m.Kernel.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.KernelTCP != nil { + l = m.KernelTCP.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryOomControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OomKillDisable != 0 { + n += 1 + sovMetrics(uint64(m.OomKillDisable)) + } + if m.UnderOom != 0 { + n += 1 + sovMetrics(uint64(m.UnderOom)) + } + if m.OomKill != 0 { + n += 1 + sovMetrics(uint64(m.OomKill)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.IoServiceBytesRecursive) > 0 { + for _, e := range m.IoServiceBytesRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServicedRecursive) > 0 { + for _, e := range m.IoServicedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoQueuedRecursive) > 0 { + for _, e := range m.IoQueuedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for _, e := range m.IoServiceTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for _, e := range m.IoWaitTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoMergedRecursive) > 0 { + for _, e := range m.IoMergedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoTimeRecursive) > 0 { + for _, e := range m.IoTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.SectorsRecursive) > 0 { + for _, e := range m.SectorsRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Op) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Major != 0 { + n += 1 + sovMetrics(uint64(m.Major)) + } + if m.Minor != 0 { + n += 1 + sovMetrics(uint64(m.Minor)) + } + if m.Value != 0 { + n += 1 + sovMetrics(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Current) > 0 { + for _, e := range m.Current { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Limit) > 0 { + for _, e := range m.Limit { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.HcaHandles != 0 { + n += 1 + sovMetrics(uint64(m.HcaHandles)) + } + if m.HcaObjects != 0 { + n += 1 + sovMetrics(uint64(m.HcaObjects)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NetworkStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.RxBytes != 0 { + n += 1 + sovMetrics(uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + n += 1 + sovMetrics(uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + n += 1 + sovMetrics(uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + n += 1 + sovMetrics(uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + n += 1 + sovMetrics(uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + n += 1 + sovMetrics(uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + n += 1 + sovMetrics(uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + n += 1 + sovMetrics(uint64(m.TxDropped)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CgroupStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NrSleeping != 0 { + n += 1 + sovMetrics(uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + n += 1 + sovMetrics(uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + n += 1 + sovMetrics(uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + n += 1 + sovMetrics(uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + n += 1 + sovMetrics(uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForHugetlb := "[]*HugetlbStat{" + for _, f := range this.Hugetlb { + repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," + } + repeatedStringForHugetlb += "}" + repeatedStringForNetwork := "[]*NetworkStat{" + for _, f := range this.Network { + repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," + } + repeatedStringForNetwork += "}" + s := strings.Join([]string{`&Metrics{`, + `Hugetlb:` + repeatedStringForHugetlb + `,`, + `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, + `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, + `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, + `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + repeatedStringForNetwork + `,`, + `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, + `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *HugetlbStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HugetlbStat{`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *PidsStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsStat{`, + `Current:` + fmt.Sprintf("%v", this.Current) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUStat{`, + `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, + `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUUsage{`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Throttle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Throttle{`, + `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, + `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, + `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryStat{`, + `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, + `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, + `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, + `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, + `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, + `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, + `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, + `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, + `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, + `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, + `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, + `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, + `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, + `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, + `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, + `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, + `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, + `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, + `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, + `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, + `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, + `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, + `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, + `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, + `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, + `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, + `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, + `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, + `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, + `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, + `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, + `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, + `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryEntry{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryOomControl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryOomControl{`, + `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, + `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, + `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceBytesRecursive { + repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceBytesRecursive += "}" + repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServicedRecursive { + repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServicedRecursive += "}" + repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoQueuedRecursive { + repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoQueuedRecursive += "}" + repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceTimeRecursive { + repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceTimeRecursive += "}" + repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoWaitTimeRecursive { + repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoWaitTimeRecursive += "}" + repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoMergedRecursive { + repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoMergedRecursive += "}" + repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoTimeRecursive { + repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoTimeRecursive += "}" + repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" + for _, f := range this.SectorsRecursive { + repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForSectorsRecursive += "}" + s := strings.Join([]string{`&BlkIOStat{`, + `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, + `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, + `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, + `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, + `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, + `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, + `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, + `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlkIOEntry{`, + `Op:` + fmt.Sprintf("%v", this.Op) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Major:` + fmt.Sprintf("%v", this.Major) + `,`, + `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrent := "[]*RdmaEntry{" + for _, f := range this.Current { + repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForCurrent += "}" + repeatedStringForLimit := "[]*RdmaEntry{" + for _, f := range this.Limit { + repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForLimit += "}" + s := strings.Join([]string{`&RdmaStat{`, + `Current:` + repeatedStringForCurrent + `,`, + `Limit:` + repeatedStringForLimit + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RdmaEntry{`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, + `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkStat{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, + `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, + `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, + `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, + `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, + `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, + `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, + `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupStats{`, + `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, + `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, + `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, + `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, + `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) + if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pids == nil { + m.Pids = &PidsStat{} + } + if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &CPUStat{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryStat{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blkio == nil { + m.Blkio = &BlkIOStat{} + } + if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rdma == nil { + m.Rdma = &RdmaStat{} + } + if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = append(m.Network, &NetworkStat{}) + if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupStats == nil { + m.CgroupStats = &CgroupStats{} + } + if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryOomControl == nil { + m.MemoryOomControl = &MemoryOomControl{} + } + if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugetlbStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pagesize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &CPUUsage{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttling == nil { + m.Throttling = &Throttle{} + } + if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + m.Kernel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kernel |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + m.User = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.User |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PerCPU) == 0 { + m.PerCPU = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Throttle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Throttle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) + } + m.Periods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Periods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) + } + m.ThrottledPeriods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledPeriods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) + } + m.ThrottledTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + m.Cache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) + } + m.RSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) + } + m.RSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) + } + m.MappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) + } + m.Dirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) + } + m.Writeback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Writeback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) + } + m.PgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) + } + m.PgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) + } + m.PgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) + } + m.PgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) + } + m.InactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) + } + m.ActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) + } + m.InactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) + } + m.ActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) + } + m.Unevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) + } + m.HierarchicalMemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) + } + m.HierarchicalSwapLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) + } + m.TotalCache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalCache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) + } + m.TotalRSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) + } + m.TotalRSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) + } + m.TotalMappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalMappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) + } + m.TotalDirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) + } + m.TotalWriteback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalWriteback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) + } + m.TotalPgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) + } + m.TotalPgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) + } + m.TotalPgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) + } + m.TotalPgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) + } + m.TotalInactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) + } + m.TotalActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) + } + m.TotalInactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) + } + m.TotalActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) + } + m.TotalUnevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalUnevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &MemoryEntry{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &MemoryEntry{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &MemoryEntry{} + } + if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTCP == nil { + m.KernelTCP = &MemoryEntry{} + } + if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) + } + m.OomKillDisable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKillDisable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) + } + m.UnderOom = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnderOom |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) + } + m.OomKill = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKill |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) + if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) + if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) + if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) + if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) + if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) + if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) + if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) + if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Op = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + m.Major = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Major |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + m.Minor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minor |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Current = append(m.Current, &RdmaEntry{}) + if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limit = append(m.Limit, &RdmaEntry{}) + if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) + } + m.HcaHandles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaHandles |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) + } + m.HcaObjects = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaObjects |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + m.RxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) + } + m.RxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + m.RxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) + } + m.RxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + m.TxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) + } + m.TxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + m.TxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) + } + m.TxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) + } + m.NrSleeping = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrSleeping |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) + } + m.NrRunning = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrRunning |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) + } + m.NrStopped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrStopped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) + } + m.NrUninterruptible = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrUninterruptible |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) + } + m.NrIoWait = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrIoWait |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt new file mode 100644 index 000000000..e476cea64 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt @@ -0,0 +1,790 @@ +file { + name: "github.com/containerd/cgroups/stats/v1/metrics.proto" + package: "io.containerd.cgroups.v1" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Metrics" + field { + name: "hugetlb" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.HugetlbStat" + json_name: "hugetlb" + } + field { + name: "pids" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.PidsStat" + json_name: "pids" + } + field { + name: "cpu" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUStat" + options { + 65004: "CPU" + } + json_name: "cpu" + } + field { + name: "memory" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryStat" + json_name: "memory" + } + field { + name: "blkio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOStat" + json_name: "blkio" + } + field { + name: "rdma" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaStat" + json_name: "rdma" + } + field { + name: "network" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.NetworkStat" + json_name: "network" + } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } + field { + name: "memory_oom_control" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryOomControl" + json_name: "memoryOomControl" + } + } + message_type { + name: "HugetlbStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + field { + name: "pagesize" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "pagesize" + } + } + message_type { + name: "PidsStat" + field { + name: "current" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + } + message_type { + name: "CPUStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUUsage" + json_name: "usage" + } + field { + name: "throttling" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.Throttle" + json_name: "throttling" + } + } + message_type { + name: "CPUUsage" + field { + name: "total" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "total" + } + field { + name: "kernel" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "kernel" + } + field { + name: "user" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "user" + } + field { + name: "per_cpu" + number: 4 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + 65004: "PerCPU" + } + json_name: "perCpu" + } + } + message_type { + name: "Throttle" + field { + name: "periods" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "periods" + } + field { + name: "throttled_periods" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledPeriods" + } + field { + name: "throttled_time" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledTime" + } + } + message_type { + name: "MemoryStat" + field { + name: "cache" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "cache" + } + field { + name: "rss" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSS" + } + json_name: "rss" + } + field { + name: "rss_huge" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSSHuge" + } + json_name: "rssHuge" + } + field { + name: "mapped_file" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "mappedFile" + } + field { + name: "dirty" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "dirty" + } + field { + name: "writeback" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "writeback" + } + field { + name: "pg_pg_in" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgIn" + } + field { + name: "pg_pg_out" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgOut" + } + field { + name: "pg_fault" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgFault" + } + field { + name: "pg_maj_fault" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgMajFault" + } + field { + name: "inactive_anon" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveAnon" + } + field { + name: "active_anon" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeAnon" + } + field { + name: "inactive_file" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveFile" + } + field { + name: "active_file" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeFile" + } + field { + name: "unevictable" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "unevictable" + } + field { + name: "hierarchical_memory_limit" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalMemoryLimit" + } + field { + name: "hierarchical_swap_limit" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalSwapLimit" + } + field { + name: "total_cache" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalCache" + } + field { + name: "total_rss" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSS" + } + json_name: "totalRss" + } + field { + name: "total_rss_huge" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSSHuge" + } + json_name: "totalRssHuge" + } + field { + name: "total_mapped_file" + number: 21 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalMappedFile" + } + field { + name: "total_dirty" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalDirty" + } + field { + name: "total_writeback" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalWriteback" + } + field { + name: "total_pg_pg_in" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgIn" + } + field { + name: "total_pg_pg_out" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgOut" + } + field { + name: "total_pg_fault" + number: 26 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgFault" + } + field { + name: "total_pg_maj_fault" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgMajFault" + } + field { + name: "total_inactive_anon" + number: 28 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveAnon" + } + field { + name: "total_active_anon" + number: 29 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveAnon" + } + field { + name: "total_inactive_file" + number: 30 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveFile" + } + field { + name: "total_active_file" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveFile" + } + field { + name: "total_unevictable" + number: 32 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalUnevictable" + } + field { + name: "usage" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "usage" + } + field { + name: "swap" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "swap" + } + field { + name: "kernel" + number: 35 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "kernel" + } + field { + name: "kernel_tcp" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + options { + 65004: "KernelTCP" + } + json_name: "kernelTcp" + } + } + message_type { + name: "MemoryEntry" + field { + name: "limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + field { + name: "usage" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + } + message_type { + name: "MemoryOomControl" + field { + name: "oom_kill_disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKillDisable" + } + field { + name: "under_oom" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "underOom" + } + field { + name: "oom_kill" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKill" + } + } + message_type { + name: "BlkIOStat" + field { + name: "io_service_bytes_recursive" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceBytesRecursive" + } + field { + name: "io_serviced_recursive" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServicedRecursive" + } + field { + name: "io_queued_recursive" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioQueuedRecursive" + } + field { + name: "io_service_time_recursive" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceTimeRecursive" + } + field { + name: "io_wait_time_recursive" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioWaitTimeRecursive" + } + field { + name: "io_merged_recursive" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioMergedRecursive" + } + field { + name: "io_time_recursive" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioTimeRecursive" + } + field { + name: "sectors_recursive" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "sectorsRecursive" + } + } + message_type { + name: "BlkIOEntry" + field { + name: "op" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "op" + } + field { + name: "device" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "major" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "major" + } + field { + name: "minor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "minor" + } + field { + name: "value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "RdmaStat" + field { + name: "current" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "limit" + } + } + message_type { + name: "RdmaEntry" + field { + name: "device" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "hca_handles" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaHandles" + } + field { + name: "hca_objects" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaObjects" + } + } + message_type { + name: "NetworkStat" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "rx_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxBytes" + } + field { + name: "rx_packets" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxPackets" + } + field { + name: "rx_errors" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxErrors" + } + field { + name: "rx_dropped" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxDropped" + } + field { + name: "tx_bytes" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txBytes" + } + field { + name: "tx_packets" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txPackets" + } + field { + name: "tx_errors" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txErrors" + } + field { + name: "tx_dropped" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txDropped" + } + } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } + syntax: "proto3" +} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto new file mode 100644 index 000000000..b3f6cc37d --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package io.containerd.cgroups.v1; + +import "gogoproto/gogo.proto"; + +message Metrics { + repeated HugetlbStat hugetlb = 1; + PidsStat pids = 2; + CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; + MemoryStat memory = 4; + BlkIOStat blkio = 5; + RdmaStat rdma = 6; + repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; + MemoryOomControl memory_oom_control = 9; +} + +message HugetlbStat { + uint64 usage = 1; + uint64 max = 2; + uint64 failcnt = 3; + string pagesize = 4; +} + +message PidsStat { + uint64 current = 1; + uint64 limit = 2; +} + +message CPUStat { + CPUUsage usage = 1; + Throttle throttling = 2; +} + +message CPUUsage { + // values in nanoseconds + uint64 total = 1; + uint64 kernel = 2; + uint64 user = 3; + repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; + +} + +message Throttle { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message MemoryStat { + uint64 cache = 1; + uint64 rss = 2 [(gogoproto.customname) = "RSS"]; + uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; + uint64 mapped_file = 4; + uint64 dirty = 5; + uint64 writeback = 6; + uint64 pg_pg_in = 7; + uint64 pg_pg_out = 8; + uint64 pg_fault = 9; + uint64 pg_maj_fault = 10; + uint64 inactive_anon = 11; + uint64 active_anon = 12; + uint64 inactive_file = 13; + uint64 active_file = 14; + uint64 unevictable = 15; + uint64 hierarchical_memory_limit = 16; + uint64 hierarchical_swap_limit = 17; + uint64 total_cache = 18; + uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; + uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; + uint64 total_mapped_file = 21; + uint64 total_dirty = 22; + uint64 total_writeback = 23; + uint64 total_pg_pg_in = 24; + uint64 total_pg_pg_out = 25; + uint64 total_pg_fault = 26; + uint64 total_pg_maj_fault = 27; + uint64 total_inactive_anon = 28; + uint64 total_active_anon = 29; + uint64 total_inactive_file = 30; + uint64 total_active_file = 31; + uint64 total_unevictable = 32; + MemoryEntry usage = 33; + MemoryEntry swap = 34; + MemoryEntry kernel = 35; + MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; + +} + +message MemoryEntry { + uint64 limit = 1; + uint64 usage = 2; + uint64 max = 3; + uint64 failcnt = 4; +} + +message MemoryOomControl { + uint64 oom_kill_disable = 1; + uint64 under_oom = 2; + uint64 oom_kill = 3; +} + +message BlkIOStat { + repeated BlkIOEntry io_service_bytes_recursive = 1; + repeated BlkIOEntry io_serviced_recursive = 2; + repeated BlkIOEntry io_queued_recursive = 3; + repeated BlkIOEntry io_service_time_recursive = 4; + repeated BlkIOEntry io_wait_time_recursive = 5; + repeated BlkIOEntry io_merged_recursive = 6; + repeated BlkIOEntry io_time_recursive = 7; + repeated BlkIOEntry sectors_recursive = 8; +} + +message BlkIOEntry { + string op = 1; + string device = 2; + uint64 major = 3; + uint64 minor = 4; + uint64 value = 5; +} + +message RdmaStat { + repeated RdmaEntry current = 1; + repeated RdmaEntry limit = 2; +} + +message RdmaEntry { + string device = 1; + uint32 hca_handles = 2; + uint32 hca_objects = 3; +} + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 216139c9c..000000000 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,1087 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 -) - -var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1819 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Time - _ = v0 - } -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv4 := &x.Action - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "node": - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv9 := &x.Action - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Dir != false - yyq2[6] = x.Expiration != nil - yyq2[7] = x.TTL != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { - r.EncodeNil() - } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv10 := &x.Nodes - yyv10.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv11 := &x.CreatedIndex - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) - } - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv13 := &x.ModifiedIndex - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) - } - } - case "expiration": - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { - r.DecodeBuiltin(yym17, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym16 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv18 := &x.TTL - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int64)(yyv18)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv21 := &x.Key - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv23 := &x.Dir - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv25 := &x.Value - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv27 := &x.Nodes - yyv27.CodecDecodeSelf(d) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv28 := &x.CreatedIndex - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv30 := &x.ModifiedIndex - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym33 := z.DecBinary() - _ = yym33 - if false { - } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { - r.DecodeBuiltin(yym34, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym33 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj20-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Node, yyrl1) - } - } else { - yyv1 = make([]*Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, nil) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw3 := yyv1[yyj1] - yyw3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 *Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw4 := yyv1[yyj1] - yyw4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index fdfa34359..000000000 --- a/vendor/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go deleted file mode 100644 index 4fcdb5ad9..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "log" - "math/big" - "net" - "os" - "path/filepath" - "time" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/tlsutil" -) - -func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { - if l, err = newListener(addr, scheme); err != nil { - return nil, err - } - return wrapTLS(addr, scheme, tlscfg, l) -} - -func newListener(addr string, scheme string) (net.Listener, error) { - if scheme == "unix" || scheme == "unixs" { - // unix sockets via unix://laddr - return NewUnixListener(addr) - } - return net.Listen("tcp", addr) -} - -func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { - if scheme != "https" && scheme != "unixs" { - return l, nil - } - if tlscfg == nil { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) - } - return tls.NewListener(l, tlscfg), nil -} - -type TLSInfo struct { - CertFile string - KeyFile string - CAFile string - TrustedCAFile string - ClientCertAuth bool - - // ServerName ensures the cert matches the given host in case of discovery / virtual hosting - ServerName string - - selfCert bool - - // parseFunc exists to simplify testing. Typically, parseFunc - // should be left nil. In that case, tls.X509KeyPair will be used. - parseFunc func([]byte, []byte) (tls.Certificate, error) -} - -func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth) -} - -func (info TLSInfo) Empty() bool { - return info.CertFile == "" && info.KeyFile == "" -} - -func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = fileutil.TouchDirAll(dirpath); err != nil { - return - } - - certPath := filepath.Join(dirpath, "cert.pem") - keyPath := filepath.Join(dirpath, "key.pem") - _, errcert := os.Stat(certPath) - _, errkey := os.Stat(keyPath) - if errcert == nil && errkey == nil { - info.CertFile = certPath - info.KeyFile = keyPath - info.selfCert = true - return - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return - } - - tmpl := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"etcd"}}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(365 * (24 * time.Hour)), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - for _, host := range hosts { - h, _, _ := net.SplitHostPort(host) - if ip := net.ParseIP(h); ip != nil { - tmpl.IPAddresses = append(tmpl.IPAddresses, ip) - } else { - tmpl.DNSNames = append(tmpl.DNSNames, h) - } - } - - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) - if err != nil { - return - } - - certOut, err := os.Create(certPath) - if err != nil { - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - b, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return - } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return - } - pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) - keyOut.Close() - - return SelfCert(dirpath, hosts) -} - -func (info TLSInfo) baseConfig() (*tls.Config, error) { - if info.KeyFile == "" || info.CertFile == "" { - return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) - } - - tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - if err != nil { - return nil, err - } - - cfg := &tls.Config{ - Certificates: []tls.Certificate{*tlsCert}, - MinVersion: tls.VersionTLS12, - ServerName: info.ServerName, - } - return cfg, nil -} - -// cafiles returns a list of CA file paths. -func (info TLSInfo) cafiles() []string { - cs := make([]string, 0) - if info.CAFile != "" { - cs = append(cs, info.CAFile) - } - if info.TrustedCAFile != "" { - cs = append(cs, info.TrustedCAFile) - } - return cs -} - -// ServerConfig generates a tls.Config object for use by an HTTP server. -func (info TLSInfo) ServerConfig() (*tls.Config, error) { - cfg, err := info.baseConfig() - if err != nil { - return nil, err - } - - cfg.ClientAuth = tls.NoClientCert - if info.CAFile != "" || info.ClientCertAuth { - cfg.ClientAuth = tls.RequireAndVerifyClientCert - } - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cp, err := tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - cfg.ClientCAs = cp - } - - // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server - cfg.NextProtos = []string{"h2"} - - return cfg, nil -} - -// ClientConfig generates a tls.Config object for use by an HTTP client. -func (info TLSInfo) ClientConfig() (*tls.Config, error) { - var cfg *tls.Config - var err error - - if !info.Empty() { - cfg, err = info.baseConfig() - if err != nil { - return nil, err - } - } else { - cfg = &tls.Config{ServerName: info.ServerName} - } - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - // if given a CA, trust any host with a cert signed by the CA - log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated") - cfg.ServerName = "" - } - - if info.selfCert { - cfg.InsecureSkipVerify = true - } - return cfg, nil -} - -// ShallowCopyTLSConfig copies *tls.Config. This is only -// work-around for go-vet tests, which complains -// -// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex -// -// Keep up-to-date with 'go/src/crypto/tls/common.go' -func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config { - ncfg := tls.Config{ - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } - return &ncfg -} diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go index 1074275b0..8d6f68906 100644 --- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go +++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go @@ -31,7 +31,6 @@ type Error struct { exec.ExitError cmd exec.Cmd msg string - proto Protocol exitStatus *int //for overriding } @@ -51,9 +50,8 @@ func (e *Error) IsNotExist() bool { if e.ExitStatus() != 1 { return false } - cmdIptables := getIptablesCommand(e.proto) - msgNoRuleExist := fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", cmdIptables) - msgNoChainExist := fmt.Sprintf("%s: No chain/target/match by that name.\n", cmdIptables) + msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" + msgNoChainExist := "No chain/target/match by that name.\n" return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) } @@ -75,6 +73,7 @@ type IPTables struct { v2 int v3 int mode string // the underlying iptables operating mode, e.g. nf_tables + timeout int // time to wait for the iptables lock, default waits forever } // Stat represents a structured statistic entry. @@ -91,19 +90,42 @@ type Stat struct { Options string `json:"options"` } -// New creates a new IPTables. -// For backwards compatibility, this always uses IPv4, i.e. "iptables". -func New() (*IPTables, error) { - return NewWithProtocol(ProtocolIPv4) +type option func(*IPTables) + +func IPFamily(proto Protocol) option { + return func(ipt *IPTables) { + ipt.proto = proto + } } -// New creates a new IPTables for the given proto. -// The proto will determine which command is used, either "iptables" or "ip6tables". -func NewWithProtocol(proto Protocol) (*IPTables, error) { - path, err := exec.LookPath(getIptablesCommand(proto)) +func Timeout(timeout int) option { + return func(ipt *IPTables) { + ipt.timeout = timeout + } +} + +// New creates a new IPTables configured with the options passed as parameter. +// For backwards compatibility, by default always uses IPv4 and timeout 0. +// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing +// the IPFamily and Timeout options as follow: +// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5)) +func New(opts ...option) (*IPTables, error) { + + ipt := &IPTables{ + proto: ProtocolIPv4, + timeout: 0, + } + + for _, opt := range opts { + opt(ipt) + } + + path, err := exec.LookPath(getIptablesCommand(ipt.proto)) if err != nil { return nil, err } + ipt.path = path + vstring, err := getIptablesVersionString(path) if err != nil { return nil, fmt.Errorf("could not get iptables version: %v", err) @@ -112,21 +134,23 @@ func NewWithProtocol(proto Protocol) (*IPTables, error) { if err != nil { return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err) } + ipt.v1 = v1 + ipt.v2 = v2 + ipt.v3 = v3 + ipt.mode = mode checkPresent, waitPresent, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + ipt.hasCheck = checkPresent + ipt.hasWait = waitPresent + ipt.hasRandomFully = randomFullyPresent - ipt := IPTables{ - path: path, - proto: proto, - hasCheck: checkPresent, - hasWait: waitPresent, - hasRandomFully: randomFullyPresent, - v1: v1, - v2: v2, - v3: v3, - mode: mode, - } - return &ipt, nil + return ipt, nil +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + return New(IPFamily(proto), Timeout(0)) } // Proto returns the protocol used by this IPTables. @@ -185,6 +209,14 @@ func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { return ipt.run(cmd...) } +func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err == nil && exists { + err = ipt.Delete(table, chain, rulespec...) + } + return err +} + // List rules in specified table/chain func (ipt *IPTables) List(table, chain string) ([]string, error) { args := []string{"-t", table, "-S", chain} @@ -222,6 +254,21 @@ func (ipt *IPTables) ListChains(table string) ([]string, error) { return chains, nil } +// '-S' is fine with non existing rule index as long as the chain exists +// therefore pass index 1 to reduce overhead for large chains +func (ipt *IPTables) ChainExists(table, chain string) (bool, error) { + err := ipt.run("-t", table, "-S", chain, "1") + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + // Stats lists rules including the byte and packet counts func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} @@ -401,6 +448,18 @@ func (ipt *IPTables) DeleteChain(table, chain string) error { return ipt.run("-t", table, "-X", chain) } +func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error { + exists, err := ipt.ChainExists(table, chain) + if err != nil || !exists { + return err + } + err = ipt.run("-t", table, "-F", chain) + if err == nil { + err = ipt.run("-t", table, "-X", chain) + } + return err +} + // ChangePolicy changes policy on chain to target func (ipt *IPTables) ChangePolicy(table, chain, target string) error { return ipt.run("-t", table, "-P", chain, target) @@ -428,6 +487,9 @@ func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { args = append([]string{ipt.path}, args...) if ipt.hasWait { args = append(args, "--wait") + if ipt.timeout != 0 { + args = append(args, strconv.Itoa(ipt.timeout)) + } } else { fmu, err := newXtablesFileLock() if err != nil { @@ -452,7 +514,7 @@ func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { if err := cmd.Run(); err != nil { switch e := err.(type) { case *exec.ExitError: - return &Error{*e, cmd, stderr.String(), ipt.proto, nil} + return &Error{*e, cmd, stderr.String(), nil} default: return err } diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE similarity index 100% rename from vendor/github.com/coreos/etcd/LICENSE rename to vendor/github.com/coreos/go-semver/LICENSE diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 000000000..76cf4852c --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 000000000..e256b41a5 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go index ef85a3ba2..a0f4837a0 100644 --- a/vendor/github.com/coreos/go-systemd/journal/journal.go +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -33,7 +33,10 @@ import ( "os" "strconv" "strings" + "sync" + "sync/atomic" "syscall" + "unsafe" ) // Priority of a journal message @@ -50,19 +53,35 @@ const ( PriDebug ) -var conn net.Conn +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) func init() { - var err error - conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") - if err != nil { - conn = nil - } + onceConn.Do(initConn) } -// Enabled returns true if the local systemd journal is available for logging +// Enabled checks whether the local systemd journal is available for logging. func Enabled() bool { - return conn != nil + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true } // Send a message to the local systemd journal. vars is a map of journald @@ -73,8 +92,14 @@ func Enabled() bool { // (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) // for more details. vars may be nil. func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) if conn == nil { - return journalError("could not connect to journald socket") + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", } data := new(bytes.Buffer) @@ -84,32 +109,30 @@ func Send(message string, priority Priority, vars map[string]string) error { appendVariable(data, k, v) } - _, err := io.Copy(conn, data) - if err != nil && isSocketSpaceError(err) { - file, err := tempFd() - if err != nil { - return journalError(err.Error()) - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return journalError(err.Error()) - } - - rights := syscall.UnixRights(int(file.Fd())) + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } - /* this connection should always be a UnixConn, but better safe than sorry */ - unixConn, ok := conn.(*net.UnixConn) - if !ok { - return journalError("can't send file through non-Unix connection") - } - _, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil) - if err != nil { - return journalError(err.Error()) - } - } else if err != nil { - return journalError(err.Error()) + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + return nil } @@ -119,8 +142,8 @@ func Print(priority Priority, format string, a ...interface{}) error { } func appendVariable(w io.Writer, name, value string) { - if !validVarName(name) { - journalError("variable name contains invalid character, ignoring") + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) } if strings.ContainsRune(value, '\n') { /* When the value contains a newline, we write: @@ -137,32 +160,42 @@ func appendVariable(w io.Writer, name, value string) { } } -func validVarName(name string) bool { - /* The variable name must be in uppercase and consist only of characters, - * numbers and underscores, and may not begin with an underscore. (from the docs) - */ +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } - valid := name[0] != '_' for _, c := range name { - valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } } - return valid + return nil } +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. func isSocketSpaceError(err error) bool { opErr, ok := err.(*net.OpError) - if !ok { + if !ok || opErr == nil { return false } - sysErr, ok := opErr.Err.(syscall.Errno) - if !ok { + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { return false } - return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS } +// tempFd creates a temporary, unlinked file under `/dev/shm`. func tempFd() (*os.File, error) { file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") if err != nil { @@ -175,8 +208,18 @@ func tempFd() (*os.File, error) { return file, nil } -func journalError(s string) error { - s = "journal error: " + s - fmt.Fprintln(os.Stderr, s) - return errors.New(s) +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) } diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md index 81efb1fb6..f79dbfca5 100644 --- a/vendor/github.com/coreos/pkg/capnslog/README.md +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -21,7 +21,7 @@ Still the job of `main` to expose these configurations. `main` may delegate this Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. -Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. ##### Log objects are an interface diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go index 44b8cd361..38ce6d261 100644 --- a/vendor/github.com/coreos/pkg/capnslog/init.go +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -32,7 +32,7 @@ import ( func init() { initHijack() - // Go `log` pacakge uses os.Stderr. + // Go `log` package uses os.Stderr. SetFormatter(NewDefaultFormatter(os.Stderr)) SetGlobalLogLevel(INFO) } diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go index 849544883..226b60c22 100644 --- a/vendor/github.com/coreos/pkg/capnslog/logmap.go +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -95,6 +95,11 @@ func (l *LogLevel) Set(s string) error { return nil } +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + // ParseLevel translates some potential loglevel strings into their corresponding levels. func ParseLevel(s string) (LogLevel, error) { switch s { diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go index 612d55c66..00ff37149 100644 --- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -37,6 +37,14 @@ func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...inte } } +// SetLevel allows users to change the current logging level. +func (p *PackageLogger) SetLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + p.level = l +} + +// LevelAt checks if the given log level will be outputted under current setting. func (p *PackageLogger) LevelAt(l LogLevel) bool { logger.Lock() defer logger.Unlock() @@ -81,6 +89,12 @@ func (p *PackageLogger) Panic(args ...interface{}) { panic(s) } +func (p *PackageLogger) Panicln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + func (p *PackageLogger) Fatalf(format string, args ...interface{}) { p.Logf(CRITICAL, format, args...) os.Exit(1) diff --git a/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/denverdino/aliyungo/common/client.go index a59789fee..6c00d1c19 100644 --- a/vendor/github.com/denverdino/aliyungo/common/client.go +++ b/vendor/github.com/denverdino/aliyungo/common/client.go @@ -3,10 +3,14 @@ package common import ( "bytes" "encoding/json" + "errors" + "fmt" "io/ioutil" "log" "net/http" "net/url" + "os" + "strconv" "strings" "time" @@ -25,6 +29,7 @@ type UnderlineString string type Client struct { AccessKeyId string //Access Key Id AccessKeySecret string //Access Key Secret + securityToken string debug bool httpClient *http.Client endpoint string @@ -32,29 +37,73 @@ type Client struct { serviceCode string regionID Region businessInfo string - userAgent string + userAgent string } -// NewClient creates a new instance of ECS client +// Initialize properties of a client instance func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { client.AccessKeyId = accessKeyId - client.AccessKeySecret = accessKeySecret + "&" + ak := accessKeySecret + if !strings.HasSuffix(ak, "&") { + ak += "&" + } + client.AccessKeySecret = ak client.debug = false - client.httpClient = &http.Client{} + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 0 + } + if handshakeTimeout == 0 { + client.httpClient = &http.Client{} + } else { + t := &http.Transport{ + TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second} + client.httpClient = &http.Client{Transport: t} + } client.endpoint = endpoint client.version = version } +// Initialize properties of a client instance including regionID func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) { client.Init(endpoint, version, accessKeyId, accessKeySecret) client.serviceCode = serviceCode client.regionID = regionID - client.setEndpointByLocation(regionID, serviceCode, accessKeyId, accessKeySecret) +} + +// Intialize client object when all properties are ready +func (client *Client) InitClient() *Client { + client.debug = false + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 0 + } + if handshakeTimeout == 0 { + client.httpClient = &http.Client{} + } else { + t := &http.Transport{ + TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second} + client.httpClient = &http.Client{Transport: t} + } + return client +} + +func (client *Client) NewInitForAssumeRole(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region, securityToken string) { + client.NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode, regionID) + client.securityToken = securityToken +} + +//getLocationEndpoint +func (client *Client) getEndpointByLocation() string { + locationClient := NewLocationClient(client.AccessKeyId, client.AccessKeySecret, client.securityToken) + locationClient.SetDebug(true) + return locationClient.DescribeOpenAPIEndpoint(client.regionID, client.serviceCode) } //NewClient using location service -func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret string) { - locationClient := NewLocationClient(accessKeyId, accessKeySecret) +func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret, securityToken string) { + locationClient := NewLocationClient(accessKeyId, accessKeySecret, securityToken) + locationClient.SetDebug(true) ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode) if ep == "" { ep = loadEndpointFromFile(region, serviceCode) @@ -65,6 +114,95 @@ func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKe } } +// Ensure all necessary properties are valid +func (client *Client) ensureProperties() error { + var msg string + + if client.endpoint == "" { + msg = fmt.Sprintf("endpoint cannot be empty!") + } else if client.version == "" { + msg = fmt.Sprintf("version cannot be empty!") + } else if client.AccessKeyId == "" { + msg = fmt.Sprintf("AccessKeyId cannot be empty!") + } else if client.AccessKeySecret == "" { + msg = fmt.Sprintf("AccessKeySecret cannot be empty!") + } + + if msg != "" { + return errors.New(msg) + } + + return nil +} + +// ---------------------------------------------------- +// WithXXX methods +// ---------------------------------------------------- + +// WithEndpoint sets custom endpoint +func (client *Client) WithEndpoint(endpoint string) *Client { + client.SetEndpoint(endpoint) + return client +} + +// WithVersion sets custom version +func (client *Client) WithVersion(version string) *Client { + client.SetVersion(version) + return client +} + +// WithRegionID sets Region ID +func (client *Client) WithRegionID(regionID Region) *Client { + client.SetRegionID(regionID) + return client +} + +//WithServiceCode sets serviceCode +func (client *Client) WithServiceCode(serviceCode string) *Client { + client.SetServiceCode(serviceCode) + return client +} + +// WithAccessKeyId sets new AccessKeyId +func (client *Client) WithAccessKeyId(id string) *Client { + client.SetAccessKeyId(id) + return client +} + +// WithAccessKeySecret sets new AccessKeySecret +func (client *Client) WithAccessKeySecret(secret string) *Client { + client.SetAccessKeySecret(secret) + return client +} + +// WithSecurityToken sets securityToken +func (client *Client) WithSecurityToken(securityToken string) *Client { + client.SetSecurityToken(securityToken) + return client +} + +// WithDebug sets debug mode to log the request/response message +func (client *Client) WithDebug(debug bool) *Client { + client.SetDebug(debug) + return client +} + +// WithBusinessInfo sets business info to log the request/response message +func (client *Client) WithBusinessInfo(businessInfo string) *Client { + client.SetBusinessInfo(businessInfo) + return client +} + +// WithUserAgent sets user agent to the request/response message +func (client *Client) WithUserAgent(userAgent string) *Client { + client.SetUserAgent(userAgent) + return client +} + +// ---------------------------------------------------- +// SetXXX methods +// ---------------------------------------------------- + // SetEndpoint sets custom endpoint func (client *Client) SetEndpoint(endpoint string) { client.endpoint = endpoint @@ -75,6 +213,7 @@ func (client *Client) SetVersion(version string) { client.version = version } +// SetEndpoint sets Region ID func (client *Client) SetRegionID(regionID Region) { client.regionID = regionID } @@ -113,11 +252,41 @@ func (client *Client) SetUserAgent(userAgent string) { client.userAgent = userAgent } +//set SecurityToken +func (client *Client) SetSecurityToken(securityToken string) { + client.securityToken = securityToken +} + +func (client *Client) initEndpoint() error { + // if set any value to "CUSTOMIZED_ENDPOINT" could skip location service. + // example: export CUSTOMIZED_ENDPOINT=true + if os.Getenv("CUSTOMIZED_ENDPOINT") != "" { + return nil + } + + if client.serviceCode != "" && client.regionID != "" { + endpoint := client.getEndpointByLocation() + if endpoint == "" { + return GetCustomError("InvalidEndpoint", "endpoint is empty,pls check") + } + client.endpoint = endpoint + } + return nil +} + // Invoke sends the raw HTTP request for ECS services func (client *Client) Invoke(action string, args interface{}, response interface{}) error { + if err := client.ensureProperties(); err != nil { + return err + } + + //init endpoint + if err := client.initEndpoint(); err != nil { + return err + } request := Request{} - request.init(client.version, action, client.AccessKeyId) + request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID) query := util.ConvertToQueryValues(request) util.SetQueryValues(args, &query) @@ -137,7 +306,7 @@ func (client *Client) Invoke(action string, args interface{}, response interface // TODO move to util and add build val flag httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) - httpReq.Header.Set("User-Agent", httpReq.UserAgent()+ " " +client.userAgent) + httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent) t0 := time.Now() httpResp, err := client.httpClient.Do(httpReq) @@ -185,9 +354,17 @@ func (client *Client) Invoke(action string, args interface{}, response interface // Invoke sends the raw HTTP request for ECS services func (client *Client) InvokeByFlattenMethod(action string, args interface{}, response interface{}) error { + if err := client.ensureProperties(); err != nil { + return err + } + + //init endpoint + if err := client.initEndpoint(); err != nil { + return err + } request := Request{} - request.init(client.version, action, client.AccessKeyId) + request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID) query := util.ConvertToQueryValues(request) @@ -208,7 +385,7 @@ func (client *Client) InvokeByFlattenMethod(action string, args interface{}, res // TODO move to util and add build val flag httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) - httpReq.Header.Set("User-Agent", httpReq.UserAgent()+ " " +client.userAgent) + httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent) t0 := time.Now() httpResp, err := client.httpClient.Do(httpReq) @@ -258,10 +435,17 @@ func (client *Client) InvokeByFlattenMethod(action string, args interface{}, res //改进了一下上面那个方法,可以使用各种Http方法 //2017.1.30 增加了一个path参数,用来拓展访问的地址 func (client *Client) InvokeByAnyMethod(method, action, path string, args interface{}, response interface{}) error { + if err := client.ensureProperties(); err != nil { + return err + } - request := Request{} - request.init(client.version, action, client.AccessKeyId) + //init endpoint + if err := client.initEndpoint(); err != nil { + return err + } + request := Request{} + request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID) data := util.ConvertToQueryValues(request) util.SetQueryValues(args, &data) @@ -290,8 +474,7 @@ func (client *Client) InvokeByAnyMethod(method, action, path string, args interf // TODO move to util and add build val flag httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) - - httpReq.Header.Set("User-Agent", httpReq.Header.Get("User-Agent")+ " " +client.userAgent) + httpReq.Header.Set("User-Agent", httpReq.Header.Get("User-Agent")+" "+client.userAgent) t0 := time.Now() httpResp, err := client.httpClient.Do(httpReq) @@ -355,3 +538,13 @@ func GetClientErrorFromString(str string) error { func GetClientError(err error) error { return GetClientErrorFromString(err.Error()) } + +func GetCustomError(code, message string) error { + return &Error{ + ErrorResponse: ErrorResponse{ + Code: code, + Message: message, + }, + StatusCode: 400, + } +} diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoint.go b/vendor/github.com/denverdino/aliyungo/common/endpoint.go index 16bcbf9d6..757f7a784 100644 --- a/vendor/github.com/denverdino/aliyungo/common/endpoint.go +++ b/vendor/github.com/denverdino/aliyungo/common/endpoint.go @@ -4,8 +4,10 @@ import ( "encoding/xml" "fmt" "io/ioutil" + "log" "os" "strings" + "time" ) const ( @@ -18,6 +20,57 @@ const ( var ( endpoints = make(map[Region]map[string]string) + + SpecailEnpoints = map[Region]map[string]string{ + APNorthEast1: { + "ecs": "https://ecs.ap-northeast-1.aliyuncs.com", + "slb": "https://slb.ap-northeast-1.aliyuncs.com", + "rds": "https://rds.ap-northeast-1.aliyuncs.com", + "vpc": "https://vpc.ap-northeast-1.aliyuncs.com", + }, + APSouthEast2: { + "ecs": "https://ecs.ap-southeast-2.aliyuncs.com", + "slb": "https://slb.ap-southeast-2.aliyuncs.com", + "rds": "https://rds.ap-southeast-2.aliyuncs.com", + "vpc": "https://vpc.ap-southeast-2.aliyuncs.com", + }, + APSouthEast3: { + "ecs": "https://ecs.ap-southeast-3.aliyuncs.com", + "slb": "https://slb.ap-southeast-3.aliyuncs.com", + "rds": "https://rds.ap-southeast-3.aliyuncs.com", + "vpc": "https://vpc.ap-southeast-3.aliyuncs.com", + }, + MEEast1: { + "ecs": "https://ecs.me-east-1.aliyuncs.com", + "slb": "https://slb.me-east-1.aliyuncs.com", + "rds": "https://rds.me-east-1.aliyuncs.com", + "vpc": "https://vpc.me-east-1.aliyuncs.com", + }, + EUCentral1: { + "ecs": "https://ecs.eu-central-1.aliyuncs.com", + "slb": "https://slb.eu-central-1.aliyuncs.com", + "rds": "https://rds.eu-central-1.aliyuncs.com", + "vpc": "https://vpc.eu-central-1.aliyuncs.com", + }, + EUWest1: { + "ecs": "https://ecs.eu-west-1.aliyuncs.com", + "slb": "https://slb.eu-west-1.aliyuncs.com", + "rds": "https://rds.eu-west-1.aliyuncs.com", + "vpc": "https://vpc.eu-west-1.aliyuncs.com", + }, + Zhangjiakou: { + "ecs": "https://ecs.cn-zhangjiakou.aliyuncs.com", + "slb": "https://slb.cn-zhangjiakou.aliyuncs.com", + "rds": "https://rds.cn-zhangjiakou.aliyuncs.com", + "vpc": "https://vpc.cn-zhangjiakou.aliyuncs.com", + }, + Huhehaote: { + "ecs": "https://ecs.cn-huhehaote.aliyuncs.com", + "slb": "https://slb.cn-huhehaote.aliyuncs.com", + "rds": "https://rds.cn-huhehaote.aliyuncs.com", + "vpc": "https://vpc.cn-huhehaote.aliyuncs.com", + }, + } ) //init endpoints from file @@ -25,18 +78,39 @@ func init() { } -func NewLocationClient(accessKeyId, accessKeySecret string) *Client { +type LocationClient struct { + Client +} + +func NewLocationClient(accessKeyId, accessKeySecret, securityToken string) *LocationClient { endpoint := os.Getenv("LOCATION_ENDPOINT") if endpoint == "" { endpoint = locationDefaultEndpoint } - client := &Client{} + client := &LocationClient{} client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret) + client.securityToken = securityToken return client } -func (client *Client) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) { +func NewLocationClientWithSecurityToken(accessKeyId, accessKeySecret, securityToken string) *LocationClient { + endpoint := os.Getenv("LOCATION_ENDPOINT") + if endpoint == "" { + endpoint = locationDefaultEndpoint + } + + client := &LocationClient{} + client.WithEndpoint(endpoint). + WithVersion(locationAPIVersion). + WithAccessKeyId(accessKeyId). + WithAccessKeySecret(accessKeySecret). + WithSecurityToken(securityToken). + InitClient() + return client +} + +func (client *LocationClient) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) { response := &DescribeEndpointResponse{} err := client.Invoke("DescribeEndpoint", args, response) if err != nil { @@ -45,6 +119,15 @@ func (client *Client) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEnd return response, err } +func (client *LocationClient) DescribeEndpoints(args *DescribeEndpointsArgs) (*DescribeEndpointsResponse, error) { + response := &DescribeEndpointsResponse{} + err := client.Invoke("DescribeEndpoints", args, response) + if err != nil { + return nil, err + } + return response, err +} + func getProductRegionEndpoint(region Region, serviceCode string) string { if sp, ok := endpoints[region]; ok { if endpoint, ok := sp[serviceCode]; ok { @@ -61,32 +144,41 @@ func setProductRegionEndpoint(region Region, serviceCode string, endpoint string } } -func (client *Client) DescribeOpenAPIEndpoint(region Region, serviceCode string) string { +func (client *LocationClient) DescribeOpenAPIEndpoint(region Region, serviceCode string) string { if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" { return endpoint } - defaultProtocols := HTTP_PROTOCOL - args := &DescribeEndpointArgs{ + args := &DescribeEndpointsArgs{ Id: region, ServiceCode: serviceCode, Type: "openAPI", } - endpoint, err := client.DescribeEndpoint(args) - if err != nil || endpoint.Endpoint == "" { + var endpoint *DescribeEndpointsResponse + var err error + for index := 0; index < 5; index++ { + endpoint, err = client.DescribeEndpoints(args) + if err == nil && endpoint != nil && len(endpoint.Endpoints.Endpoint) > 0 { + break + } + time.Sleep(500 * time.Millisecond) + } + + if err != nil || endpoint == nil || len(endpoint.Endpoints.Endpoint) <= 0 { + log.Printf("aliyungo: can not get endpoint from service, use default. endpoint=[%v], error=[%v]\n", endpoint, err) return "" } - for _, protocol := range endpoint.Protocols.Protocols { + for _, protocol := range endpoint.Endpoints.Endpoint[0].Protocols.Protocols { if strings.ToLower(protocol) == HTTPS_PROTOCOL { defaultProtocols = HTTPS_PROTOCOL break } } - ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoint) + ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoints.Endpoint[0].Endpoint) setProductRegionEndpoint(region, serviceCode, ep) return ep @@ -97,13 +189,11 @@ func loadEndpointFromFile(region Region, serviceCode string) string { if err != nil { return "" } - var endpoints Endpoints err = xml.Unmarshal(data, &endpoints) if err != nil { return "" } - for _, endpoint := range endpoints.Endpoint { if endpoint.RegionIds.RegionId == string(region) { for _, product := range endpoint.Products.Product { diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml index 4079bcd2b..26ea765b2 100644 --- a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml +++ b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml @@ -1336,6 +1336,17 @@ Slbslb.eu-central-1.aliyuncs.com + + eu-west-1 + + Rdsrds.eu-west-1.aliyuncs.com + Ecsecs.eu-west-1.aliyuncs.com + Vpcvpc.eu-west-1.aliyuncs.com + Kmskms.eu-west-1.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.eu-west-1.aliyuncs.com + + cn-zhangjiakou @@ -1346,4 +1357,14 @@ Slbslb.cn-zhangjiakou.aliyuncs.com + + cn-huhehaote + + Rdsrds.cn-huhehaote.aliyuncs.com + Ecsecs.cn-huhehaote.aliyuncs.com + Vpcvpc.cn-huhehaote.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.cn-huhehaote.aliyuncs.com + + diff --git a/vendor/github.com/denverdino/aliyungo/common/regions.go b/vendor/github.com/denverdino/aliyungo/common/regions.go index 62e6e9d81..e6bc728fe 100644 --- a/vendor/github.com/denverdino/aliyungo/common/regions.go +++ b/vendor/github.com/denverdino/aliyungo/common/regions.go @@ -12,10 +12,15 @@ const ( Shenzhen = Region("cn-shenzhen") Shanghai = Region("cn-shanghai") Zhangjiakou = Region("cn-zhangjiakou") + Huhehaote = Region("cn-huhehaote") APSouthEast1 = Region("ap-southeast-1") APNorthEast1 = Region("ap-northeast-1") APSouthEast2 = Region("ap-southeast-2") + APSouthEast3 = Region("ap-southeast-3") + APSouthEast5 = Region("ap-southeast-5") + + APSouth1 = Region("ap-south-1") USWest1 = Region("us-west-1") USEast1 = Region("us-east-1") @@ -23,12 +28,28 @@ const ( MEEast1 = Region("me-east-1") EUCentral1 = Region("eu-central-1") + EUWest1 = Region("eu-west-1") + + ShenZhenFinance = Region("cn-shenzhen-finance-1") + ShanghaiFinance = Region("cn-shanghai-finance-1") ) var ValidRegions = []Region{ - Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, Zhangjiakou, + Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, Zhangjiakou, Huhehaote, USWest1, USEast1, - APNorthEast1, APSouthEast1, APSouthEast2, + APNorthEast1, APSouthEast1, APSouthEast2, APSouthEast3, APSouthEast5, + APSouth1, MEEast1, - EUCentral1, + EUCentral1, EUWest1, + ShenZhenFinance, ShanghaiFinance, +} + +// IsValidRegion checks if r is an Ali supported region. +func IsValidRegion(r string) bool { + for _, v := range ValidRegions { + if r == string(v) { + return true + } + } + return false } diff --git a/vendor/github.com/denverdino/aliyungo/common/request.go b/vendor/github.com/denverdino/aliyungo/common/request.go index 2a883f19b..f35c2990d 100644 --- a/vendor/github.com/denverdino/aliyungo/common/request.go +++ b/vendor/github.com/denverdino/aliyungo/common/request.go @@ -20,7 +20,9 @@ const ( type Request struct { Format string Version string + RegionId Region AccessKeyId string + SecurityToken string Signature string SignatureMethod string Timestamp util.ISO6801Time @@ -30,7 +32,7 @@ type Request struct { Action string } -func (request *Request) init(version string, action string, AccessKeyId string) { +func (request *Request) init(version string, action string, AccessKeyId string, securityToken string, regionId Region) { request.Format = JSONResponseFormat request.Timestamp = util.NewISO6801Time(time.Now().UTC()) request.Version = version @@ -39,6 +41,8 @@ func (request *Request) init(version string, action string, AccessKeyId string) request.SignatureNonce = util.CreateRandomString() request.Action = action request.AccessKeyId = AccessKeyId + request.SecurityToken = securityToken + request.RegionId = regionId } type Response struct { diff --git a/vendor/github.com/denverdino/aliyungo/common/types.go b/vendor/github.com/denverdino/aliyungo/common/types.go index a74e150e9..cf161f11b 100644 --- a/vendor/github.com/denverdino/aliyungo/common/types.go +++ b/vendor/github.com/denverdino/aliyungo/common/types.go @@ -36,6 +36,23 @@ type DescribeEndpointResponse struct { EndpointItem } +type DescribeEndpointsArgs struct { + Id Region + ServiceCode string + Type string +} + +type DescribeEndpointsResponse struct { + Response + Endpoints APIEndpoints + RequestId string + Success bool +} + +type APIEndpoints struct { + Endpoint []EndpointItem +} + type NetType string const ( @@ -48,6 +65,7 @@ type TimeType string const ( Hour = TimeType("Hour") Day = TimeType("Day") + Week = TimeType("Week") Month = TimeType("Month") Year = TimeType("Year") ) diff --git a/vendor/github.com/denverdino/aliyungo/ecs/client.go b/vendor/github.com/denverdino/aliyungo/ecs/client.go index d70a1554e..7ae1fb52e 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/client.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/client.go @@ -20,8 +20,7 @@ const ( // ECSDefaultEndpoint is the default API endpoint of ECS services ECSDefaultEndpoint = "https://ecs-cn-hangzhou.aliyuncs.com" ECSAPIVersion = "2014-05-26" - - ECSServiceCode = "ecs" + ECSServiceCode = "ecs" VPCDefaultEndpoint = "https://vpc.aliyuncs.com" VPCAPIVersion = "2016-04-28" @@ -37,37 +36,88 @@ func NewClient(accessKeyId, accessKeySecret string) *Client { return NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret) } +func NewClientWithRegion(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client { + client := &Client{} + client.NewInit(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret, ECSServiceCode, regionID) + return client +} + +func NewClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string) *Client { + client := &Client{} + client.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret) + return client +} + +// --------------------------------------- +// NewECSClient creates a new instance of ECS client +// --------------------------------------- func NewECSClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client { + return NewECSClientWithSecurityToken(accessKeyId, accessKeySecret, "", regionID) +} + +func NewECSClientWithSecurityToken(accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client { endpoint := os.Getenv("ECS_ENDPOINT") if endpoint == "" { endpoint = ECSDefaultEndpoint } - return NewClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID) + return NewECSClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, securityToken, regionID) } -func NewClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client { - client := &Client{} - client.NewInit(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret, ECSServiceCode, regionID) - return client +func NewECSClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client { + return NewECSClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, "", regionID) } -func NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string) *Client { +func NewECSClientWithEndpointAndSecurityToken(endpoint string, accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client { client := &Client{} - client.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret) + client.WithEndpoint(endpoint). + WithVersion(ECSAPIVersion). + WithAccessKeyId(accessKeyId). + WithAccessKeySecret(accessKeySecret). + WithSecurityToken(securityToken). + WithServiceCode(ECSServiceCode). + WithRegionID(regionID). + InitClient() return client } -func NewVPCClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client { +// --------------------------------------- +// NewVPCClient creates a new instance of VPC client +// --------------------------------------- +func NewVPCClient(accessKeyId string, accessKeySecret string, regionID common.Region) *Client { + return NewVPCClientWithSecurityToken(accessKeyId, accessKeySecret, "", regionID) +} + +func NewVPCClientWithSecurityToken(accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client { endpoint := os.Getenv("VPC_ENDPOINT") if endpoint == "" { endpoint = VPCDefaultEndpoint } - return NewVPCClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID) + return NewVPCClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, securityToken, regionID) +} + +func NewVPCClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client { + return NewVPCClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, "", regionID) +} + +func NewVPCClientWithEndpointAndSecurityToken(endpoint string, accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client { + client := &Client{} + client.WithEndpoint(endpoint). + WithVersion(VPCAPIVersion). + WithAccessKeyId(accessKeyId). + WithAccessKeySecret(accessKeySecret). + WithSecurityToken(securityToken). + WithServiceCode(VPCServiceCode). + WithRegionID(regionID). + InitClient() + return client } -func NewVPCClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client { +// --------------------------------------- +// NewVPCClientWithRegion creates a new instance of VPC client automatically get endpoint +// --------------------------------------- +func NewVPCClientWithRegion(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client { client := &Client{} client.NewInit(endpoint, VPCAPIVersion, accessKeyId, accessKeySecret, VPCServiceCode, regionID) return client diff --git a/vendor/github.com/denverdino/aliyungo/ecs/disks.go b/vendor/github.com/denverdino/aliyungo/ecs/disks.go index f1d1e9341..ae2c40872 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/disks.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/disks.go @@ -59,7 +59,7 @@ type DescribeDisksArgs struct { Category DiskCategory //enum for all(default) | cloud | ephemeral Status DiskStatus //enum for In_use | Available | Attaching | Detaching | Creating | ReIniting | All(default) SnapshotId string - Name string + DiskName string Portable *bool //optional DeleteWithInstance *bool //optional DeleteAutoSnapshot *bool //optional @@ -78,6 +78,7 @@ type DiskItemType struct { DiskName string Description string Type DiskType + Encrypted bool Category DiskCategory Size int ImageId string @@ -110,10 +111,7 @@ type DescribeDisksResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&describedisks func (client *Client) DescribeDisks(args *DescribeDisksArgs) (disks []DiskItemType, pagination *common.PaginationResult, err error) { - response := DescribeDisksResponse{} - - err = client.Invoke("DescribeDisks", args, &response) - + response, err := client.DescribeDisksWithRaw(args) if err != nil { return nil, nil, err } @@ -121,11 +119,24 @@ func (client *Client) DescribeDisks(args *DescribeDisksArgs) (disks []DiskItemTy return response.Disks.Disk, &response.PaginationResult, err } +func (client *Client) DescribeDisksWithRaw(args *DescribeDisksArgs) (response *DescribeDisksResponse, err error) { + response = &DescribeDisksResponse{} + + err = client.Invoke("DescribeDisks", args, response) + + if err != nil { + return nil, err + } + + return response, err +} + type CreateDiskArgs struct { RegionId common.Region ZoneId string DiskName string Description string + Encrypted bool DiskCategory DiskCategory Size int SnapshotId string @@ -231,6 +242,28 @@ func (client *Client) DetachDisk(instanceId string, diskId string) error { return err } +type ResizeDiskArgs struct { + DiskId string + NewSize int +} + +type ResizeDiskResponse struct { + common.Response +} + +// +// ResizeDisk can only support to enlarge disk size +// You can read doc at https://help.aliyun.com/document_detail/25522.html +func (client *Client) ResizeDisk(diskId string, sizeGB int) error { + args := ResizeDiskArgs{ + DiskId: diskId, + NewSize: sizeGB, + } + response := ResizeDiskResponse{} + err := client.Invoke("ResizeDisk", &args, &response) + return err +} + type ResetDiskArgs struct { DiskId string SnapshotId string diff --git a/vendor/github.com/denverdino/aliyungo/ecs/eni.go b/vendor/github.com/denverdino/aliyungo/ecs/eni.go new file mode 100644 index 000000000..698d4fa70 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/eni.go @@ -0,0 +1,183 @@ +package ecs + +import ( + "fmt" + "time" + + "github.com/denverdino/aliyungo/common" +) + +type CreateNetworkInterfaceArgs struct { + RegionId common.Region + VSwitchId string + PrimaryIpAddress string // optional + SecurityGroupId string + NetworkInterfaceName string // optional + Description string // optional + ClientToken string // optional +} + +type CreateNetworkInterfaceResponse struct { + common.Response + NetworkInterfaceId string +} +type DeleteNetworkInterfaceArgs struct { + RegionId common.Region + NetworkInterfaceId string +} + +type DeleteNetworkInterfaceResponse struct { + common.Response +} + +type DescribeNetworkInterfacesArgs struct { + RegionId common.Region + VSwitchId string + PrimaryIpAddress string + SecurityGroupId string + NetworkInterfaceName string + Type string + InstanceId string + NetworkInterfaceId []string `query:"list"` + PageNumber int + PageSize int +} +type NetworkInterfaceType struct { + NetworkInterfaceId string + NetworkInterfaceName string + PrimaryIpAddress string + MacAddress string + Status string + PrivateIpAddress string +} + +type DescribeNetworkInterfacesResponse struct { + common.Response + NetworkInterfaceSets struct { + NetworkInterfaceSet []NetworkInterfaceType + } + TotalCount int + PageNumber int + PageSize int +} +type AttachNetworkInterfaceArgs struct { + RegionId common.Region + NetworkInterfaceId string + InstanceId string +} + +type AttachNetworkInterfaceResponse common.Response + +type DetachNetworkInterfaceArgs AttachNetworkInterfaceArgs + +type DetachNetworkInterfaceResponse common.Response + +type ModifyNetworkInterfaceAttributeArgs struct { + RegionId common.Region + NetworkInterfaceId string + SecurityGroupId []string + NetworkInterfaceName string + Description string +} +type ModifyNetworkInterfaceAttributeResponse common.Response + +type UnassignPrivateIpAddressesArgs struct { + RegionId common.Region + NetworkInterfaceId string + PrivateIpAddress []string `query:"list"` +} + +type UnassignPrivateIpAddressesResponse common.Response + +type AssignPrivateIpAddressesArgs struct { + RegionId common.Region + NetworkInterfaceId string + PrivateIpAddress []string `query:"list"` // optional + SecondaryPrivateIpAddressCount int // optional +} + +type AssignPrivateIpAddressesResponse common.Response + +func (client *Client) CreateNetworkInterface(args *CreateNetworkInterfaceArgs) (resp *CreateNetworkInterfaceResponse, err error) { + resp = &CreateNetworkInterfaceResponse{} + err = client.Invoke("CreateNetworkInterface", args, resp) + return resp, err +} + +func (client *Client) DeleteNetworkInterface(args *DeleteNetworkInterfaceArgs) (resp *DeleteNetworkInterfaceResponse, err error) { + resp = &DeleteNetworkInterfaceResponse{} + err = client.Invoke("DeleteNetworkInterface", args, resp) + return resp, err +} + +func (client *Client) DescribeNetworkInterfaces(args *DescribeNetworkInterfacesArgs) (resp *DescribeNetworkInterfacesResponse, err error) { + resp = &DescribeNetworkInterfacesResponse{} + err = client.Invoke("DescribeNetworkInterfaces", args, resp) + return resp, err +} + +func (client *Client) AttachNetworkInterface(args *AttachNetworkInterfaceArgs) error { + resp := &AttachNetworkInterfaceResponse{} + err := client.Invoke("AttachNetworkInterface", args, resp) + return err +} + +func (client *Client) DetachNetworkInterface(args *DetachNetworkInterfaceArgs) (resp *DetachNetworkInterfaceResponse, err error) { + resp = &DetachNetworkInterfaceResponse{} + err = client.Invoke("DetachNetworkInterface", args, resp) + return resp, err +} + +func (client *Client) ModifyNetworkInterfaceAttribute(args *ModifyNetworkInterfaceAttributeArgs) (resp *ModifyNetworkInterfaceAttributeResponse, err error) { + resp = &ModifyNetworkInterfaceAttributeResponse{} + err = client.Invoke("ModifyNetworkInterfaceAttribute", args, resp) + return resp, err +} + +func (client *Client) UnassignPrivateIpAddresses(args *UnassignPrivateIpAddressesArgs) (resp *UnassignPrivateIpAddressesResponse, err error) { + resp = &UnassignPrivateIpAddressesResponse{} + err = client.Invoke("UnassignPrivateIpAddresses", args, resp) + return resp, err +} + +func (client *Client) AssignPrivateIpAddresses(args *AssignPrivateIpAddressesArgs) (resp *AssignPrivateIpAddressesResponse, err error) { + resp = &AssignPrivateIpAddressesResponse{} + err = client.Invoke("AssignPrivateIpAddresses", args, resp) + return resp, err +} + +// Default timeout value for WaitForInstance method +const NetworkInterfacesDefaultTimeout = 120 + +// WaitForInstance waits for instance to given status +func (client *Client) WaitForNetworkInterface(regionId common.Region, eniID string, status string, timeout int) error { + if timeout <= 0 { + timeout = NetworkInterfacesDefaultTimeout + } + for { + + eniIds := []string{eniID} + + describeNetworkInterfacesArgs := DescribeNetworkInterfacesArgs{ + RegionId: regionId, + NetworkInterfaceId: eniIds, + } + + nisResponse, err := client.DescribeNetworkInterfaces(&describeNetworkInterfacesArgs) + if err != nil { + return fmt.Errorf("Failed to describe network interface %v: %v", eniID, err) + } + + if len(nisResponse.NetworkInterfaceSets.NetworkInterfaceSet) > 0 && nisResponse.NetworkInterfaceSets.NetworkInterfaceSet[0].Status == status { + break + } + + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return fmt.Errorf("Timeout for waiting available status for network interfaces") + } + time.Sleep(DefaultWaitForInterval * time.Second) + + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go b/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go index 2a316e18e..ad716a1a5 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go @@ -79,17 +79,25 @@ func (client *Client) CreateForwardEntry(args *CreateForwardEntryArgs) (resp *Cr func (client *Client) DescribeForwardTableEntries(args *DescribeForwardTableEntriesArgs) (forwardTableEntries []ForwardTableEntrySetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeForwardTableEntriesWithRaw(args) + if err != nil { + return nil, nil, err + } + return response.ForwardTableEntries.ForwardTableEntry, &response.PaginationResult, nil +} + +func (client *Client) DescribeForwardTableEntriesWithRaw(args *DescribeForwardTableEntriesArgs) (response *DescribeForwardTableEntriesResponse, err error) { args.Validate() - response := DescribeForwardTableEntriesResponse{} + response = &DescribeForwardTableEntriesResponse{} - err = client.Invoke("DescribeForwardTableEntries", args, &response) + err = client.Invoke("DescribeForwardTableEntries", args, response) if err != nil { - return nil, nil, err + return nil, err } - return response.ForwardTableEntries.ForwardTableEntry, &response.PaginationResult, nil + return response, nil } func (client *Client) ModifyForwardEntry(args *ModifyForwardEntryArgs) error { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/images.go b/vendor/github.com/denverdino/aliyungo/ecs/images.go index 8f3dea945..e50297adc 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/images.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/images.go @@ -37,6 +37,13 @@ const ( ImageUsageNone = ImageUsage("none") ) +type ImageFormatType string + +const ( + RAW = ImageFormatType("RAW") + VHD = ImageFormatType("VHD") +) + // DescribeImagesArgs repsents arguments to describe images type DescribeImagesArgs struct { RegionId common.Region @@ -63,8 +70,10 @@ type DescribeImagesResponse struct { type DiskDeviceMapping struct { SnapshotId string //Why Size Field is string-type. - Size string - Device string + Size string + // Now the key Size change to DiskImageSize + DiskImageSize string + Device string //For import images Format string OSSBucket string @@ -102,16 +111,23 @@ type ImageType struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/image&describeimages func (client *Client) DescribeImages(args *DescribeImagesArgs) (images []ImageType, pagination *common.PaginationResult, err error) { - - args.Validate() - response := DescribeImagesResponse{} - err = client.Invoke("DescribeImages", args, &response) + response, err := client.DescribeImagesWithRaw(args) if err != nil { return nil, nil, err } return response.Images.Image, &response.PaginationResult, nil } +func (client *Client) DescribeImagesWithRaw(args *DescribeImagesArgs) (response *DescribeImagesResponse, err error) { + args.Validate() + response = &DescribeImagesResponse{} + err = client.Invoke("DescribeImages", args, response) + if err != nil { + return nil, err + } + return response, nil +} + // CreateImageArgs repsents arguments to create image type CreateImageArgs struct { RegionId common.Region @@ -144,6 +160,7 @@ func (client *Client) CreateImage(args *CreateImageArgs) (imageId string, err er type DeleteImageArgs struct { RegionId common.Region ImageId string + Force bool } type DeleteImageResponse struct { @@ -163,6 +180,20 @@ func (client *Client) DeleteImage(regionId common.Region, imageId string) error return client.Invoke("DeleteImage", &args, &response) } +// DeleteImage deletes Image +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/image&deleteimage +func (client *Client) DeleteImageWithForce(regionId common.Region, imageId string, force bool) error { + args := DeleteImageArgs{ + RegionId: regionId, + ImageId: imageId, + Force: force, + } + + response := &DeleteImageResponse{} + return client.Invoke("DeleteImage", &args, &response) +} + // ModifyImageSharePermission repsents arguments to share image type ModifyImageSharePermissionArgs struct { RegionId common.Region diff --git a/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go b/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go index a133806f1..d26b5666a 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go @@ -9,10 +9,19 @@ type DescribeInstanceTypesArgs struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancetypeitemtype type InstanceTypeItemType struct { - InstanceTypeId string - CpuCoreCount int - MemorySize float64 - InstanceTypeFamily string + InstanceTypeId string + CpuCoreCount int + MemorySize float64 + InstanceTypeFamily string + GPUAmount int + GPUSpec string + InitialCredit int + BaselineCredit int + EniQuantity int + EniPrivateIpAddressQuantity int + LocalStorageCapacity int + LocalStorageAmount int + LocalStorageCategory string } type DescribeInstanceTypesResponse struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/instances.go b/vendor/github.com/denverdino/aliyungo/ecs/instances.go index 5740d79ee..dd424f96b 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/instances.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/instances.go @@ -94,10 +94,7 @@ type DescribeInstanceStatusResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancestatus func (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) (instanceStatuses []InstanceStatusItemType, pagination *common.PaginationResult, err error) { - args.Validate() - response := DescribeInstanceStatusResponse{} - - err = client.Invoke("DescribeInstanceStatus", args, &response) + response, err := client.DescribeInstanceStatusWithRaw(args) if err == nil { return response.InstanceStatuses.InstanceStatus, &response.PaginationResult, nil @@ -106,6 +103,18 @@ func (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) ( return nil, nil, err } +func (client *Client) DescribeInstanceStatusWithRaw(args *DescribeInstanceStatusArgs) (response *DescribeInstanceStatusResponse, err error) { + args.Validate() + response = &DescribeInstanceStatusResponse{} + + err = client.Invoke("DescribeInstanceStatus", args, response) + if err != nil { + return nil, err + } + + return response, nil +} + type StopInstanceArgs struct { InstanceId string ForceStop bool @@ -215,6 +224,7 @@ type SpotStrategyType string const ( NoSpot = SpotStrategyType("NoSpot") SpotWithPriceLimit = SpotStrategyType("SpotWithPriceLimit") + SpotAsPriceGo = SpotStrategyType("SpotAsPriceGo") ) // @@ -253,7 +263,9 @@ type InstanceAttributesType struct { Tags struct { Tag []TagItemType } - SpotStrategy SpotStrategyType + SpotStrategy SpotStrategyType + SpotPriceLimit float64 + KeyPairName string } type DescribeInstanceAttributeResponse struct { @@ -408,16 +420,46 @@ type DescribeInstancesResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstances func (client *Client) DescribeInstances(args *DescribeInstancesArgs) (instances []InstanceAttributesType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeInstancesWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.Instances.Instance, &response.PaginationResult, nil +} + +func (client *Client) DescribeInstancesWithRaw(args *DescribeInstancesArgs) (response *DescribeInstancesResponse, err error) { args.Validate() - response := DescribeInstancesResponse{} + response = &DescribeInstancesResponse{} err = client.Invoke("DescribeInstances", args, &response) - - if err == nil { - return response.Instances.Instance, &response.PaginationResult, nil + if err != nil { + return nil, err } - return nil, nil, err + return response, nil +} + +type ModifyInstanceAutoReleaseTimeArgs struct { + InstanceId string + AutoReleaseTime string +} + +type ModifyInstanceAutoReleaseTimeResponse struct { + common.Response +} + +// 对给定的实例设定自动释放时间。 +// +// You can read doc at https://help.aliyun.com/document_detail/47576.html +func (client *Client) ModifyInstanceAutoReleaseTime(instanceId, time string) error { + args := ModifyInstanceAutoReleaseTimeArgs{ + InstanceId: instanceId, + AutoReleaseTime: time, + } + response := ModifyInstanceAutoReleaseTimeResponse{} + err := client.Invoke("ModifyInstanceAutoReleaseTime", &args, &response) + return err } type DeleteInstanceArgs struct { @@ -492,31 +534,43 @@ var ( IoOptimizedOptimized = IoOptimized("optimized") ) +type SecurityEnhancementStrategy string + +var ( + InactiveSecurityEnhancementStrategy = SecurityEnhancementStrategy("Active") + DeactiveSecurityEnhancementStrategy = SecurityEnhancementStrategy("Deactive") +) + type CreateInstanceArgs struct { - RegionId common.Region - ZoneId string - ImageId string - InstanceType string - SecurityGroupId string - InstanceName string - Description string - InternetChargeType common.InternetChargeType - InternetMaxBandwidthIn int - InternetMaxBandwidthOut int - HostName string - Password string - IoOptimized IoOptimized - SystemDisk SystemDiskType - DataDisk []DataDiskType - VSwitchId string - PrivateIpAddress string - ClientToken string - InstanceChargeType common.InstanceChargeType - Period int - UserData string - AutoRenew bool - AutoRenewPeriod int - SpotStrategy SpotStrategyType + RegionId common.Region + ZoneId string + ImageId string + InstanceType string + SecurityGroupId string + InstanceName string + Description string + InternetChargeType common.InternetChargeType + InternetMaxBandwidthIn int + InternetMaxBandwidthOut int + HostName string + Password string + IoOptimized IoOptimized + SystemDisk SystemDiskType + DataDisk []DataDiskType + VSwitchId string + PrivateIpAddress string + ClientToken string + InstanceChargeType common.InstanceChargeType + Period int + PeriodUnit common.TimeType + UserData string + AutoRenew bool + AutoRenewPeriod int + SpotStrategy SpotStrategyType + SpotPriceLimit float64 + KeyPairName string + RamRoleName string + SecurityEnhancementStrategy SecurityEnhancementStrategy } type CreateInstanceResponse struct { @@ -605,3 +659,174 @@ func (client *Client) LeaveSecurityGroup(instanceId string, securityGroupId stri err := client.Invoke("LeaveSecurityGroup", &args, &response) return err } + +type AttachInstancesArgs struct { + RegionId common.Region + RamRoleName string + InstanceIds string +} + +// AttachInstanceRamRole attach instances to ram role +// +// You can read doc at https://help.aliyun.com/document_detail/54244.html?spm=5176.doc54245.6.811.zEJcS5 +func (client *Client) AttachInstanceRamRole(args *AttachInstancesArgs) (err error) { + response := common.Response{} + err = client.Invoke("AttachInstanceRamRole", args, &response) + if err != nil { + return err + } + return nil +} + +// DetachInstanceRamRole detach instances from ram role +// +// You can read doc at https://help.aliyun.com/document_detail/54245.html?spm=5176.doc54243.6.813.bt8RB3 +func (client *Client) DetachInstanceRamRole(args *AttachInstancesArgs) (err error) { + response := common.Response{} + err = client.Invoke("DetachInstanceRamRole", args, &response) + if err != nil { + return err + } + return nil +} + +type DescribeInstanceRamRoleResponse struct { + common.Response + InstanceRamRoleSets struct { + InstanceRamRoleSet []InstanceRamRoleSetType + } +} + +type InstanceRamRoleSetType struct { + InstanceId string + RamRoleName string +} + +// DescribeInstanceRamRole +// +// You can read doc at https://help.aliyun.com/document_detail/54243.html?spm=5176.doc54245.6.812.RgNCoi +func (client *Client) DescribeInstanceRamRole(args *AttachInstancesArgs) (resp *DescribeInstanceRamRoleResponse, err error) { + response := &DescribeInstanceRamRoleResponse{} + err = client.Invoke("DescribeInstanceRamRole", args, response) + if err != nil { + return response, err + } + return response, nil +} + +type ModifyInstanceSpecArgs struct { + InstanceId string + InstanceType string + InternetMaxBandwidthOut *int + InternetMaxBandwidthIn *int + ClientToken string +} + +type ModifyInstanceSpecResponse struct { + common.Response +} + +//ModifyInstanceSpec modify instance specification +// +// Notice: 1. An instance that was successfully modified once cannot be modified again within 5 minutes. +// 2. The API only can be used Pay-As-You-Go (PostPaid) instance +// +// You can read doc at https://www.alibabacloud.com/help/doc-detail/57633.htm +func (client *Client) ModifyInstanceSpec(args *ModifyInstanceSpecArgs) error { + response := ModifyInstanceSpecResponse{} + return client.Invoke("ModifyInstanceSpec", args, &response) +} + +type ModifyInstanceVpcAttributeArgs struct { + InstanceId string + VSwitchId string + PrivateIpAddress string +} + +type ModifyInstanceVpcAttributeResponse struct { + common.Response +} + +//ModifyInstanceVpcAttribute modify instance vswitchID and private ip address +// +// You can read doc at https://www.alibabacloud.com/help/doc-detail/25504.htm +func (client *Client) ModifyInstanceVpcAttribute(args *ModifyInstanceVpcAttributeArgs) error { + response := ModifyInstanceVpcAttributeResponse{} + return client.Invoke("ModifyInstanceVpcAttribute", args, &response) +} + +type ModifyInstanceChargeTypeArgs struct { + InstanceIds string + RegionId common.Region + Period int + PeriodUnit common.TimeType + IncludeDataDisks bool + DryRun bool + AutoPay bool + ClientToken string +} + +type ModifyInstanceChargeTypeResponse struct { + common.Response + Order string +} + +//ModifyInstanceChargeType modify instance charge type +// +// You can read doc at https://www.alibabacloud.com/help/doc-detail/25504.htm +func (client *Client) ModifyInstanceChargeType(args *ModifyInstanceChargeTypeArgs) (*ModifyInstanceChargeTypeResponse, error) { + response := &ModifyInstanceChargeTypeResponse{} + if err := client.Invoke("ModifyInstanceChargeType", args, response); err != nil { + return response, err + } + return response, nil +} + +type RenewalStatus string + +const ( + RenewAutoRenewal = RenewalStatus("AutoRenewal") + RenewNormal = RenewalStatus("Normal") + RenewNotRenewal = RenewalStatus("NotRenewal") +) + +type ModifyInstanceAutoRenewAttributeArgs struct { + InstanceId string + RegionId common.Region + Duration int + AutoRenew bool + RenewalStatus RenewalStatus +} + +// You can read doc at https://www.alibabacloud.com/help/doc-detail/52843.htm +func (client *Client) ModifyInstanceAutoRenewAttribute(args *ModifyInstanceAutoRenewAttributeArgs) error { + response := &common.Response{} + return client.Invoke("ModifyInstanceAutoRenewAttribute", args, response) +} + +type DescribeInstanceAutoRenewAttributeArgs struct { + InstanceId string + RegionId common.Region +} + +type InstanceRenewAttribute struct { + InstanceId string + Duration int + AutoRenewEnabled bool + PeriodUnit string + RenewalStatus RenewalStatus +} + +type DescribeInstanceAutoRenewAttributeResponse struct { + common.Response + InstanceRenewAttributes struct { + InstanceRenewAttribute []InstanceRenewAttribute + } +} + +// You can read doc at https://www.alibabacloud.com/help/doc-detail/52844.htm +func (client *Client) DescribeInstanceAutoRenewAttribute(args *DescribeInstanceAutoRenewAttributeArgs) (*DescribeInstanceAutoRenewAttributeResponse, error) { + response := &DescribeInstanceAutoRenewAttributeResponse{} + err := client.Invoke("DescribeInstanceAutoRenewAttribute", args, response) + return response, err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go b/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go index dfcb74d32..d104cf885 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go @@ -28,6 +28,16 @@ type SnatTableIdType struct { SnatTableId []string } +type IpListsType struct { + IpList []IpListItem +} + +type IpListItem struct { + IpAddress string + AllocationId string + UsingStatus string +} + type BandwidthPackageIdType struct { BandwidthPackageId []string } @@ -57,6 +67,7 @@ type NatGatewaySetType struct { BandwidthPackageIds BandwidthPackageIdType ForwardTableIds ForwardTableIdType SnatTableIds SnatTableIdType + IpLists IpListsType InstanceChargeType string Name string NatGatewayId string @@ -83,17 +94,25 @@ type DescribeNatGatewaysArgs struct { func (client *Client) DescribeNatGateways(args *DescribeNatGatewaysArgs) (natGateways []NatGatewaySetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeNatGatewaysWithRaw(args) + if err == nil { + return response.NatGateways.NatGateway, &response.PaginationResult, nil + } + return nil, nil, err +} + +func (client *Client) DescribeNatGatewaysWithRaw(args *DescribeNatGatewaysArgs) (response *DescribeNatGatewayResponse, err error) { args.Validate() - response := DescribeNatGatewayResponse{} + response = &DescribeNatGatewayResponse{} - err = client.Invoke("DescribeNatGateways", args, &response) + err = client.Invoke("DescribeNatGateways", args, response) if err == nil { - return response.NatGateways.NatGateway, &response.PaginationResult, nil + return response, nil } - return nil, nil, err + return nil, err } type ModifyNatGatewayAttributeArgs struct { @@ -139,7 +158,8 @@ func (client *Client) DeleteNatGateway(args *DeleteNatGatewayArgs) error { } type DescribeBandwidthPackagesArgs struct { - RegionId common.Region + RegionId common.Region + common.Pagination BandwidthPackageId string NatGatewayId string } @@ -162,12 +182,13 @@ type DescribeBandwidthPackageType struct { type DescribeBandwidthPackagesResponse struct { common.Response + common.PaginationResult BandwidthPackages struct { BandwidthPackage []DescribeBandwidthPackageType } } -func (client *Client) DescribeBandwidthPackages(args *DescribeBandwidthPackagesArgs) ([]DescribeBandwidthPackageType, error) { +func (client *Client) DescribeBandwidthPackages(args *DescribeBandwidthPackagesArgs) (*DescribeBandwidthPackagesResponse, error) { response := &DescribeBandwidthPackagesResponse{} err := client.Invoke("DescribeBandwidthPackages", args, response) @@ -175,7 +196,7 @@ func (client *Client) DescribeBandwidthPackages(args *DescribeBandwidthPackagesA return nil, err } - return response.BandwidthPackages.BandwidthPackage, err + return response, err } type DeleteBandwidthPackageArgs struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/networks.go b/vendor/github.com/denverdino/aliyungo/ecs/networks.go index fecc7af1a..eb6b0edaf 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/networks.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/networks.go @@ -38,6 +38,7 @@ type ModifyInstanceNetworkSpec struct { InstanceId string InternetMaxBandwidthOut *int InternetMaxBandwidthIn *int + NetworkChargeType common.InternetChargeType } type ModifyInstanceNetworkSpecResponse struct { @@ -57,6 +58,7 @@ type AllocateEipAddressArgs struct { RegionId common.Region Bandwidth int InternetChargeType common.InternetChargeType + ISP string ClientToken string } @@ -81,9 +83,19 @@ func (client *Client) AllocateEipAddress(args *AllocateEipAddressArgs) (EipAddre return response.EipAddress, response.AllocationId, nil } +type EipInstanceType string + +const ( + EcsInstance = "EcsInstance" + SlbInstance = "SlbInstance" + Nat = "Nat" + HaVip = "HaVip" +) + type AssociateEipAddressArgs struct { AllocationId string InstanceId string + InstanceType EipInstanceType } type AssociateEipAddressResponse struct { @@ -102,6 +114,11 @@ func (client *Client) AssociateEipAddress(allocationId string, instanceId string return client.Invoke("AssociateEipAddress", &args, &response) } +func (client *Client) NewAssociateEipAddress(args *AssociateEipAddressArgs) error { + response := ModifyInstanceNetworkSpecResponse{} + return client.Invoke("AssociateEipAddress", args, &response) +} + // Status of disks type EipStatus string @@ -112,11 +129,22 @@ const ( EipStatusAvailable = EipStatus("Available") ) +type AssociatedInstanceType string + +const ( + AssociatedInstanceTypeEcsInstance = AssociatedInstanceType("EcsInstance") + AssociatedInstanceTypeSlbInstance = AssociatedInstanceType("SlbInstance") + AssociatedInstanceTypeNat = AssociatedInstanceType("Nat") + AssociatedInstanceTypeHaVip = AssociatedInstanceType("HaVip") +) + type DescribeEipAddressesArgs struct { - RegionId common.Region - Status EipStatus //enum Associating | Unassociating | InUse | Available - EipAddress string - AllocationId string + RegionId common.Region + Status EipStatus //enum Associating | Unassociating | InUse | Available + EipAddress string + AllocationId string + AssociatedInstanceType AssociatedInstanceType //enum EcsInstance | SlbInstance | Nat | HaVip + AssociatedInstanceId string //绑定的资源的Id。 这是一个过滤器性质的参数,若不指定,则表示不适用该条件对结果进行过滤。 如果要使用该过滤器,必须同时使用AssociatedInstanceType。若InstanceType为EcsInstance,则此处填写ECS实例Id。若InstanceType为SlbInstance,则此处填写VPC类型的私网SLB 的实例ID。若InstanceType为Nat,则此处填写NAT 的实例ID。。若InstanceType为HaVip,则此处填写HaVipId。 common.Pagination } @@ -128,6 +156,7 @@ type EipAddressSetType struct { AllocationId string Status EipStatus InstanceId string + InstanceType string Bandwidth string // Why string InternetChargeType common.InternetChargeType OperationLocks OperationLocksType @@ -146,16 +175,25 @@ type DescribeEipAddressesResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&describeeipaddresses func (client *Client) DescribeEipAddresses(args *DescribeEipAddressesArgs) (eipAddresses []EipAddressSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeEipAddressesWithRaw(args) + if err == nil { + return response.EipAddresses.EipAddress, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeEipAddressesWithRaw(args *DescribeEipAddressesArgs) (response *DescribeEipAddressesResponse, err error) { args.Validate() - response := DescribeEipAddressesResponse{} + response = &DescribeEipAddressesResponse{} - err = client.Invoke("DescribeEipAddresses", args, &response) + err = client.Invoke("DescribeEipAddresses", args, response) if err == nil { - return response.EipAddresses.EipAddress, &response.PaginationResult, nil + return response, nil } - return nil, nil, err + return nil, err } type ModifyEipAddressAttributeArgs struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go b/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go index 01f43127c..15d7c7b7d 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go @@ -76,23 +76,33 @@ type DescribeRouteTablesResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/routertable&describeroutetables func (client *Client) DescribeRouteTables(args *DescribeRouteTablesArgs) (routeTables []RouteTableSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeRouteTablesWithRaw(args) + if err == nil { + return response.RouteTables.RouteTable, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeRouteTablesWithRaw(args *DescribeRouteTablesArgs) (response *DescribeRouteTablesResponse, err error) { args.Validate() - response := DescribeRouteTablesResponse{} + response = &DescribeRouteTablesResponse{} err = client.Invoke("DescribeRouteTables", args, &response) if err == nil { - return response.RouteTables.RouteTable, &response.PaginationResult, nil + return response, nil } - return nil, nil, err + return nil, err } type NextHopType string const ( - NextHopIntance = NextHopType("Instance") //Default - NextHopTunnel = NextHopType("Tunnel") + NextHopIntance = NextHopType("Instance") //Default + NextHopTunnel = NextHopType("Tunnel") + NextHopTunnelRouterInterface = NextHopType("RouterInterface") ) type CreateRouteEntryArgs struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/router_interface.go b/vendor/github.com/denverdino/aliyungo/ecs/router_interface.go new file mode 100644 index 000000000..188bac89a --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/router_interface.go @@ -0,0 +1,257 @@ +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" +) + +type EcsCommonResponse struct { + common.Response +} +type RouterType string +type InterfaceStatus string +type Role string +type Spec string + +const ( + VRouter = RouterType("VRouter") + VBR = RouterType("VBR") + + Idl = InterfaceStatus("Idl") + Active = InterfaceStatus("Active") + Inactive = InterfaceStatus("Inactive") + // 'Idle' means the router interface is not connected. 'Idl' may be a incorrect status. + Idle = InterfaceStatus("Idle") + + InitiatingSide = Role("InitiatingSide") + AcceptingSide = Role("AcceptingSide") + + Small1 = Spec("Small.1") + Small2 = Spec("Small.2") + Small5 = Spec("Small.5") + Middle1 = Spec("Middle.1") + Middle2 = Spec("Middle.2") + Middle5 = Spec("Middle.5") + Large1 = Spec("Large.1") + Large2 = Spec("Large.2") +) + +type CreateRouterInterfaceArgs struct { + RegionId common.Region + OppositeRegionId common.Region + RouterType RouterType + OppositeRouterType RouterType + RouterId string + OppositeRouterId string + Role Role + Spec Spec + AccessPointId string + OppositeAccessPointId string + OppositeInterfaceId string + OppositeInterfaceOwnerId string + Name string + Description string + HealthCheckSourceIp string + HealthCheckTargetIp string +} + +type CreateRouterInterfaceResponse struct { + common.Response + RouterInterfaceId string +} + +// CreateRouterInterface create Router interface +// +// You can read doc at https://help.aliyun.com/document_detail/36032.html?spm=5176.product27706.6.664.EbBsxC +func (client *Client) CreateRouterInterface(args *CreateRouterInterfaceArgs) (response *CreateRouterInterfaceResponse, err error) { + response = &CreateRouterInterfaceResponse{} + err = client.Invoke("CreateRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type Filter struct { + Key string + Value []string +} + +type DescribeRouterInterfacesArgs struct { + RegionId common.Region + common.Pagination + Filter []Filter +} + +type RouterInterfaceItemType struct { + ChargeType string + RouterInterfaceId string + AccessPointId string + OppositeRegionId string + OppositeAccessPointId string + Role Role + Spec Spec + Name string + Description string + RouterId string + RouterType RouterType + CreationTime string + Status string + BusinessStatus string + ConnectedTime string + OppositeInterfaceId string + OppositeInterfaceSpec string + OppositeInterfaceStatus string + OppositeInterfaceBusinessStatus string + OppositeRouterId string + OppositeRouterType RouterType + OppositeInterfaceOwnerId string + HealthCheckSourceIp string + HealthCheckTargetIp string +} + +type DescribeRouterInterfacesResponse struct { + RouterInterfaceSet struct { + RouterInterfaceType []RouterInterfaceItemType + } + common.PaginationResult +} + +// DescribeRouterInterfaces describe Router interfaces +// +// You can read doc at https://help.aliyun.com/document_detail/36032.html?spm=5176.product27706.6.664.EbBsxC +func (client *Client) DescribeRouterInterfaces(args *DescribeRouterInterfacesArgs) (response *DescribeRouterInterfacesResponse, err error) { + response = &DescribeRouterInterfacesResponse{} + err = client.Invoke("DescribeRouterInterfaces", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type OperateRouterInterfaceArgs struct { + RegionId common.Region + RouterInterfaceId string +} + +// ConnectRouterInterface +// +// You can read doc at https://help.aliyun.com/document_detail/36031.html?spm=5176.doc36035.6.666.wkyljN +func (client *Client) ConnectRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("ConnectRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// ActivateRouterInterface active Router Interface +// +// You can read doc at https://help.aliyun.com/document_detail/36030.html?spm=5176.doc36031.6.667.DAuZLD +func (client *Client) ActivateRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("ActivateRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// DeactivateRouterInterface deactivate Router Interface +// +// You can read doc at https://help.aliyun.com/document_detail/36033.html?spm=5176.doc36030.6.668.JqCWUz +func (client *Client) DeactivateRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("DeactivateRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type ModifyRouterInterfaceSpecArgs struct { + RegionId common.Region + RouterInterfaceId string + Spec Spec +} + +type ModifyRouterInterfaceSpecResponse struct { + common.Response + Spec Spec +} + +// ModifyRouterInterfaceSpec +// +// You can read doc at https://help.aliyun.com/document_detail/36037.html?spm=5176.doc36036.6.669.McKiye +func (client *Client) ModifyRouterInterfaceSpec(args *ModifyRouterInterfaceSpecArgs) (response *ModifyRouterInterfaceSpecResponse, err error) { + response = &ModifyRouterInterfaceSpecResponse{} + err = client.Invoke("ModifyRouterInterfaceSpec", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type ModifyRouterInterfaceAttributeArgs struct { + RegionId common.Region + RouterInterfaceId string + Name string + Description string + OppositeInterfaceId string + OppositeRouterId string + OppositeInterfaceOwnerId string + HealthCheckSourceIp string + HealthCheckTargetIp string +} + +// ModifyRouterInterfaceAttribute +// +// You can read doc at https://help.aliyun.com/document_detail/36036.html?spm=5176.doc36037.6.670.Dcz3xS +func (client *Client) ModifyRouterInterfaceAttribute(args *ModifyRouterInterfaceAttributeArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("ModifyRouterInterfaceAttribute", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// DeleteRouterInterface delete Router Interface +// +// You can read doc at https://help.aliyun.com/document_detail/36034.html?spm=5176.doc36036.6.671.y2xpNt +func (client *Client) DeleteRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("DeleteRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// WaitForRouterInterface waits for router interface to given status +func (client *Client) WaitForRouterInterfaceAsyn(regionId common.Region, interfaceId string, status InterfaceStatus, timeout int) error { + if timeout <= 0 { + timeout = InstanceDefaultTimeout + } + for { + interfaces, err := client.DescribeRouterInterfaces(&DescribeRouterInterfacesArgs{ + RegionId: regionId, + Filter: []Filter{Filter{Key: "RouterInterfaceId", Value: []string{interfaceId}}}, + }) + if err != nil { + return err + } else if interfaces != nil && InterfaceStatus(interfaces.RouterInterfaceSet.RouterInterfaceType[0].Status) == status { + //TODO + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go b/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go index eaec701de..dbe854349 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go @@ -6,10 +6,15 @@ import ( ) type NicType string +type Direction string const ( NicTypeInternet = NicType("internet") NicTypeIntranet = NicType("intranet") + + DirectionIngress = Direction("ingress") + DirectionEgress = Direction("egress") + DirectionAll = Direction("all") ) type IpProtocol string @@ -29,11 +34,18 @@ const ( PermissionPolicyDrop = PermissionPolicy("drop") ) +type GroupInnerAccessPolicy string + +const ( + GroupInnerAccept = GroupInnerAccessPolicy("Accept") + GroupInnerDrop = GroupInnerAccessPolicy("Drop") +) + type DescribeSecurityGroupAttributeArgs struct { SecurityGroupId string RegionId common.Region - NicType NicType //enum for internet (default) |intranet - Direction string // enum ingress egress + NicType NicType //enum for internet (default) |intranet + Direction Direction // enum ingress egress } // @@ -64,7 +76,8 @@ type DescribeSecurityGroupAttributeResponse struct { Permissions struct { Permission []PermissionType } - VpcId string + VpcId string + InnerAccessPolicy GroupInnerAccessPolicy } // @@ -108,16 +121,25 @@ type DescribeSecurityGroupsResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&describesecuritygroups func (client *Client) DescribeSecurityGroups(args *DescribeSecurityGroupsArgs) (securityGroupItems []SecurityGroupItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeSecurityGroupsWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.SecurityGroups.SecurityGroup, &response.PaginationResult, nil +} + +func (client *Client) DescribeSecurityGroupsWithRaw(args *DescribeSecurityGroupsArgs) (response *DescribeSecurityGroupsResponse, err error) { args.Validate() - response := DescribeSecurityGroupsResponse{} + response = &DescribeSecurityGroupsResponse{} - err = client.Invoke("DescribeSecurityGroups", args, &response) + err = client.Invoke("DescribeSecurityGroups", args, response) if err != nil { - return nil, nil, err + return nil, err } - return response.SecurityGroups.SecurityGroup, &response.PaginationResult, nil + return response, nil } type CreateSecurityGroupArgs struct { @@ -188,6 +210,21 @@ func (client *Client) ModifySecurityGroupAttribute(args *ModifySecurityGroupAttr return err } +type ModifySecurityGroupPolicyArgs struct { + RegionId common.Region + SecurityGroupId string + InnerAccessPolicy GroupInnerAccessPolicy +} + +// ModifySecurityGroupPolicy modifies inner access policy of security group +// +// You can read doc at https://www.alibabacloud.com/help/doc-detail/57315.htm +func (client *Client) ModifySecurityGroupPolicy(args *ModifySecurityGroupPolicyArgs) error { + response := common.Response{} + err := client.Invoke("ModifySecurityGroupPolicy", args, &response) + return err +} + type AuthorizeSecurityGroupArgs struct { SecurityGroupId string RegionId common.Region diff --git a/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go b/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go index fb6f9c8e1..0b7cbe87b 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go @@ -26,6 +26,8 @@ type SnapshotType struct { SourceDiskSize int SourceDiskType string //enum for System | Data ProductCode string + Status string + Usage string CreationTime util.ISO6801Time } @@ -41,15 +43,24 @@ type DescribeSnapshotsResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/snapshot&describesnapshots func (client *Client) DescribeSnapshots(args *DescribeSnapshotsArgs) (snapshots []SnapshotType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeSnapshotsWithRaw(args) + if err != nil { + return nil, nil, err + } + return response.Snapshots.Snapshot, &response.PaginationResult, nil + +} + +func (client *Client) DescribeSnapshotsWithRaw(args *DescribeSnapshotsArgs) (response *DescribeSnapshotsResponse, err error) { args.Validate() - response := DescribeSnapshotsResponse{} + response = &DescribeSnapshotsResponse{} - err = client.Invoke("DescribeSnapshots", args, &response) + err = client.Invoke("DescribeSnapshots", args, response) if err != nil { - return nil, nil, err + return nil, err } - return response.Snapshots.Snapshot, &response.PaginationResult, nil + return response, nil } diff --git a/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go b/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go index aa75574c3..c2e20df85 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go @@ -70,17 +70,25 @@ func (client *Client) CreateSnatEntry(args *CreateSnatEntryArgs) (resp *CreateSn func (client *Client) DescribeSnatTableEntries(args *DescribeSnatTableEntriesArgs) (snatTableEntries []SnatEntrySetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeSnatTableEntriesWithRaw(args) + if err != nil { + return nil, nil, err + } + return response.SnatTableEntries.SnatTableEntry, &response.PaginationResult, nil +} + +func (client *Client) DescribeSnatTableEntriesWithRaw(args *DescribeSnatTableEntriesArgs) (response *DescribeSnatTableEntriesResponse, err error) { args.Validate() - response := DescribeSnatTableEntriesResponse{} + response = &DescribeSnatTableEntriesResponse{} - err = client.Invoke("DescribeSnatTableEntries", args, &response) + err = client.Invoke("DescribeSnatTableEntries", args, response) if err != nil { - return nil, nil, err + return nil, err } - return response.SnatTableEntries.SnatTableEntry, &response.PaginationResult, nil + return response, nil } func (client *Client) ModifySnatEntry(args *ModifySnatEntryArgs) error { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go b/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go index bd742442e..9a0ad2e57 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go @@ -19,7 +19,7 @@ type CreateKeyPairResponse struct { // CreateKeyPair creates keypair // // You can read doc at https://help.aliyun.com/document_detail/51771.html?spm=5176.doc51775.6.910.cedjfr -func (client *Client) CreateKeyPair(args *CreateKeyPairArgs) (resp *CreateKeyPairResponse,err error) { +func (client *Client) CreateKeyPair(args *CreateKeyPairArgs) (resp *CreateKeyPairResponse, err error) { response := CreateKeyPairResponse{} err = client.Invoke("CreateKeyPair", args, &response) if err != nil { @@ -43,7 +43,7 @@ type ImportKeyPairResponse struct { // ImportKeyPair import keypair // // You can read doc at https://help.aliyun.com/document_detail/51774.html?spm=5176.doc51771.6.911.BicQq2 -func (client *Client) ImportKeyPair(args *ImportKeyPairArgs) (resp *ImportKeyPairResponse,err error) { +func (client *Client) ImportKeyPair(args *ImportKeyPairArgs) (resp *ImportKeyPairResponse, err error) { response := ImportKeyPairResponse{} err = client.Invoke("ImportKeyPair", args, &response) if err != nil { @@ -69,18 +69,15 @@ type DescribeKeyPairsResponse struct { common.PaginationResult RegionId common.Region KeyPairs struct { - KeyPair []KeyPairItemType + KeyPair []KeyPairItemType } } // DescribeKeyPairs describe keypairs // // You can read doc at https://help.aliyun.com/document_detail/51773.html?spm=5176.doc51774.6.912.lyE0iX -func (client *Client) DescribeKeyPairs(args *DescribeKeyPairsArgs) (KeyPairs []KeyPairItemType, pagination *common.PaginationResult, err error) { - response := DescribeKeyPairsResponse{} - - err = client.Invoke("DescribeKeyPairs", args, &response) - +func (client *Client) DescribeKeyPairs(args *DescribeKeyPairsArgs) (KeyPairs []KeyPairItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeKeyPairsWithRaw(args) if err != nil { return nil, nil, err } @@ -88,6 +85,18 @@ func (client *Client) DescribeKeyPairs(args *DescribeKeyPairsArgs) (KeyPairs [ return response.KeyPairs.KeyPair, &response.PaginationResult, err } +func (client *Client) DescribeKeyPairsWithRaw(args *DescribeKeyPairsArgs) (response *DescribeKeyPairsResponse, err error) { + response = &DescribeKeyPairsResponse{} + + err = client.Invoke("DescribeKeyPairs", args, response) + + if err != nil { + return nil, err + } + + return response, err +} + type AttachKeyPairArgs struct { RegionId common.Region KeyPairName string @@ -140,5 +149,3 @@ func (client *Client) DeleteKeyPairs(args *DeleteKeyPairsArgs) (err error) { } return nil } - - diff --git a/vendor/github.com/denverdino/aliyungo/ecs/tags.go b/vendor/github.com/denverdino/aliyungo/ecs/tags.go index 5ffd4931a..fb2519a1d 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/tags.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/tags.go @@ -76,15 +76,23 @@ type DescribeResourceByTagsResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/tags&describeresourcebytags func (client *Client) DescribeResourceByTags(args *DescribeResourceByTagsArgs) (resources []ResourceItemType, pagination *common.PaginationResult, err error) { - args.Validate() - response := DescribeResourceByTagsResponse{} - err = client.Invoke("DescribeResourceByTags", args, &response) + response, err := client.DescribeResourceByTagsWithRaw(args) if err != nil { return nil, nil, err } return response.Resources.Resource, &response.PaginationResult, nil } +func (client *Client) DescribeResourceByTagsWithRaw(args *DescribeResourceByTagsArgs) (response *DescribeResourceByTagsResponse, err error) { + args.Validate() + response = &DescribeResourceByTagsResponse{} + err = client.Invoke("DescribeResourceByTags", args, response) + if err != nil { + return nil, err + } + return response, nil +} + type TagItemType struct { TagKey string TagValue string diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go b/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go index 80faf21ca..d66fe1027 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go @@ -95,16 +95,24 @@ type DescribeVpcsResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&describevpcs func (client *Client) DescribeVpcs(args *DescribeVpcsArgs) (vpcs []VpcSetType, pagination *common.PaginationResult, err error) { - args.Validate() - response := DescribeVpcsResponse{} + response, err := client.DescribeVpcsWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.Vpcs.Vpc, &response.PaginationResult, nil +} - err = client.Invoke("DescribeVpcs", args, &response) +func (client *Client) DescribeVpcsWithRaw(args *DescribeVpcsArgs) (response *DescribeVpcsResponse, err error) { + args.Validate() + response = &DescribeVpcsResponse{} - if err == nil { - return response.Vpcs.Vpc, &response.PaginationResult, nil + err = client.Invoke("DescribeVpcs", args, response) + if err != nil { + return nil, err } - return nil, nil, err + return response, err } type ModifyVpcAttributeArgs struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go b/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go index 059a324bd..04d43daa8 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go @@ -37,16 +37,25 @@ type DescribeVRoutersResponse struct { // // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vrouter&describevrouters func (client *Client) DescribeVRouters(args *DescribeVRoutersArgs) (vrouters []VRouterSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeVRoutersWithRaw(args) + if err == nil { + return response.VRouters.VRouter, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeVRoutersWithRaw(args *DescribeVRoutersArgs) (response *DescribeVRoutersResponse, err error) { args.Validate() - response := DescribeVRoutersResponse{} + response = &DescribeVRoutersResponse{} - err = client.Invoke("DescribeVRouters", args, &response) + err = client.Invoke("DescribeVRouters", args, response) if err == nil { - return response.VRouters.VRouter, &response.PaginationResult, nil + return response, nil } - return nil, nil, err + return nil, err } type ModifyVRouterAttributeArgs struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go b/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go index 8a879ec80..22d5ec8ea 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go @@ -53,6 +53,7 @@ func (client *Client) DeleteVSwitch(VSwitchId string) error { } type DescribeVSwitchesArgs struct { + RegionId common.Region VpcId string VSwitchId string ZoneId string @@ -94,15 +95,25 @@ type DescribeVSwitchesResponse struct { // You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vswitch&describevswitches func (client *Client) DescribeVSwitches(args *DescribeVSwitchesArgs) (vswitches []VSwitchSetType, pagination *common.PaginationResult, err error) { args.Validate() - response := DescribeVSwitchesResponse{} + response, err := client.DescribeVSwitchesWithRaw(args) + if err == nil { + return response.VSwitches.VSwitch, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeVSwitchesWithRaw(args *DescribeVSwitchesArgs) (response *DescribeVSwitchesResponse, err error) { + args.Validate() + response = &DescribeVSwitchesResponse{} err = client.Invoke("DescribeVSwitches", args, &response) if err == nil { - return response.VSwitches.VSwitch, &response.PaginationResult, nil + return response, nil } - return nil, nil, err + return nil, err } type ModifyVSwitchAttributeArgs struct { diff --git a/vendor/github.com/denverdino/aliyungo/ecs/zones.go b/vendor/github.com/denverdino/aliyungo/ecs/zones.go index 4818c760f..919e363a8 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/zones.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/zones.go @@ -1,6 +1,8 @@ package ecs -import "github.com/denverdino/aliyungo/common" +import ( + "github.com/denverdino/aliyungo/common" +) type ResourceType string @@ -11,6 +13,35 @@ const ( ResourceTypeIOOptimizedInstance = ResourceType("IoOptimized") ) +// The sub-item of the type AvailableResourcesType +type SupportedResourceType string + +const ( + SupportedInstanceType = SupportedResourceType("supportedInstanceType") + SupportedInstanceTypeFamily = SupportedResourceType("supportedInstanceTypeFamily") + SupportedInstanceGeneration = SupportedResourceType("supportedInstanceGeneration") + SupportedSystemDiskCategory = SupportedResourceType("supportedSystemDiskCategory") + SupportedDataDiskCategory = SupportedResourceType("supportedDataDiskCategory") + SupportedNetworkCategory = SupportedResourceType("supportedNetworkCategory") +) + +// +// You can read doc at https://help.aliyun.com/document_detail/25670.html?spm=5176.doc25640.2.1.J24zQt +type ResourcesInfoType struct { + ResourcesInfo []AvailableResourcesType +} + +// Because the sub-item of AvailableResourcesType starts with supported and golang struct cann't refer them, this uses map to parse ResourcesInfo +type AvailableResourcesType struct { + IoOptimized bool + NetworkTypes map[SupportedResourceType][]string + InstanceGenerations map[SupportedResourceType][]string + InstanceTypeFamilies map[SupportedResourceType][]string + InstanceTypes map[SupportedResourceType][]string + SystemDiskCategories map[SupportedResourceType][]DiskCategory + DataDiskCategories map[SupportedResourceType][]DiskCategory +} + type DescribeZonesArgs struct { RegionId common.Region } @@ -36,6 +67,7 @@ type AvailableInstanceTypesType struct { type ZoneType struct { ZoneId string LocalName string + AvailableResources ResourcesInfoType AvailableInstanceTypes AvailableInstanceTypesType AvailableResourceCreation AvailableResourceCreationType AvailableDiskCategories AvailableDiskCategoriesType @@ -50,16 +82,77 @@ type DescribeZonesResponse struct { // DescribeZones describes zones func (client *Client) DescribeZones(regionId common.Region) (zones []ZoneType, err error) { + response, err := client.DescribeZonesWithRaw(regionId) + if err == nil { + return response.Zones.Zone, nil + } + + return []ZoneType{}, err +} + +func (client *Client) DescribeZonesWithRaw(regionId common.Region) (response *DescribeZonesResponse, err error) { args := DescribeZonesArgs{ RegionId: regionId, } - response := DescribeZonesResponse{} + response = &DescribeZonesResponse{} - err = client.Invoke("DescribeZones", &args, &response) + err = client.Invoke("DescribeZones", &args, response) if err == nil { - return response.Zones.Zone, nil + return response, nil } - return []ZoneType{}, err + return nil, err +} + +type DescribeAvailableResourceArgs struct { + RegionId string + DestinationResource string + ZoneId string + InstanceChargeType string + SpotStrategy string + IoOptimized string + InstanceType string + SystemDiskCategory string + DataDiskCategory string + NetworkCategory string +} + +type DescribeAvailableResourceResponse struct { + common.Response + AvailableZones struct { + AvailableZone []AvailableZoneType + } +} + +type AvailableZoneType struct { + RegionId string + ZoneId string + Status string + AvailableResources struct { + AvailableResource []NewAvailableResourcesType + } +} + +type NewAvailableResourcesType struct { + Type string + SupportedResources struct { + SupportedResource []SupportedResourcesType + } +} + +type SupportedResourcesType struct { + Value string + Status string + Min string + Max string + Unit string +} + +// https://www.alibabacloud.com/help/doc-detail/66186.htm +func (client *Client) DescribeAvailableResource(args *DescribeAvailableResourceArgs) (response *DescribeAvailableResourceResponse, err error) { + + response = &DescribeAvailableResourceResponse{} + err = client.Invoke("DescribeAvailableResource", args, response) + return response, err } diff --git a/vendor/github.com/denverdino/aliyungo/metadata/client.go b/vendor/github.com/denverdino/aliyungo/metadata/client.go index bcc7cb570..6019162b7 100644 --- a/vendor/github.com/denverdino/aliyungo/metadata/client.go +++ b/vendor/github.com/denverdino/aliyungo/metadata/client.go @@ -3,7 +3,6 @@ package metadata import ( "errors" "fmt" - "github.com/denverdino/aliyungo/util" "io" "io/ioutil" "net" @@ -11,10 +10,13 @@ import ( "net/url" "strings" "time" -) -type Request struct { -} + "encoding/json" + "reflect" + + "github.com/denverdino/aliyungo/util" + "os" +) const ( ENDPOINT = "http://100.100.100.200" @@ -41,18 +43,24 @@ const ( VPC_ID = "vpc-id" VSWITCH_CIDR_BLOCK = "vswitch-cidr-block" VSWITCH_ID = "vswitch-id" + ZONE = "zone-id" + RAM_SECURITY = "Ram/security-credentials" ) -type IMetaDataClient interface { - Version(version string) IMetaDataClient - ResourceType(rtype string) IMetaDataClient - Resource(resource string) IMetaDataClient - Go() ([]string, error) +type IMetaDataRequest interface { + Version(version string) IMetaDataRequest + ResourceType(rtype string) IMetaDataRequest + Resource(resource string) IMetaDataRequest + SubResource(sub string) IMetaDataRequest Url() (string, error) + Do(api interface{}) error } type MetaData struct { - c IMetaDataClient + // mock for unit test. + mock requestMock + + client *http.Client } func NewMetaData(client *http.Client) *MetaData { @@ -60,194 +68,248 @@ func NewMetaData(client *http.Client) *MetaData { client = &http.Client{} } return &MetaData{ - c: &MetaDataClient{client: client}, + client: client, } } -func (m *MetaData) HostName() (string, error) { +func NewMockMetaData(client *http.Client, sendRequest requestMock) *MetaData { + if client == nil { + client = &http.Client{} + } + return &MetaData{ + client: client, + mock: sendRequest, + } +} - hostname, err := m.c.Resource(HOSTNAME).Go() +func (m *MetaData) New() *MetaDataRequest { + return &MetaDataRequest{ + client: m.client, + sendRequest: m.mock, + } +} + +func (m *MetaData) HostName() (string, error) { + var hostname ResultList + err := m.New().Resource(HOSTNAME).Do(&hostname) if err != nil { return "", err } - return hostname[0], nil + return hostname.result[0], nil } func (m *MetaData) ImageID() (string, error) { - - image, err := m.c.Resource(IMAGE_ID).Go() + var image ResultList + err := m.New().Resource(IMAGE_ID).Do(&image) if err != nil { return "", err } - return image[0], err + return image.result[0], err } func (m *MetaData) InstanceID() (string, error) { - - instanceid, err := m.c.Resource(INSTANCE_ID).Go() + var instanceid ResultList + err := m.New().Resource(INSTANCE_ID).Do(&instanceid) if err != nil { return "", err } - return instanceid[0], err + return instanceid.result[0], err } func (m *MetaData) Mac() (string, error) { - - mac, err := m.c.Resource(MAC).Go() + var mac ResultList + err := m.New().Resource(MAC).Do(&mac) if err != nil { return "", err } - return mac[0], nil + return mac.result[0], nil } func (m *MetaData) NetworkType() (string, error) { - - network, err := m.c.Resource(NETWORK_TYPE).Go() + var network ResultList + err := m.New().Resource(NETWORK_TYPE).Do(&network) if err != nil { return "", err } - return network[0], nil + return network.result[0], nil } func (m *MetaData) OwnerAccountID() (string, error) { - - owner, err := m.c.Resource(OWNER_ACCOUNT_ID).Go() + var owner ResultList + err := m.New().Resource(OWNER_ACCOUNT_ID).Do(&owner) if err != nil { return "", err } - return owner[0], nil + return owner.result[0], nil } func (m *MetaData) PrivateIPv4() (string, error) { - - private, err := m.c.Resource(PRIVATE_IPV4).Go() + var private ResultList + err := m.New().Resource(PRIVATE_IPV4).Do(&private) if err != nil { return "", err } - return private[0], nil + return private.result[0], nil } func (m *MetaData) Region() (string, error) { - - region, err := m.c.Resource(REGION).Go() + var region ResultList + err := m.New().Resource(REGION).Do(®ion) if err != nil { return "", err } - return region[0], nil + return region.result[0], nil } func (m *MetaData) SerialNumber() (string, error) { - - serial, err := m.c.Resource(SERIAL_NUMBER).Go() + var serial ResultList + err := m.New().Resource(SERIAL_NUMBER).Do(&serial) if err != nil { return "", err } - return serial[0], nil + return serial.result[0], nil } func (m *MetaData) SourceAddress() (string, error) { - - source, err := m.c.Resource(SOURCE_ADDRESS).Go() + var source ResultList + err := m.New().Resource(SOURCE_ADDRESS).Do(&source) if err != nil { return "", err } - return source[0], nil + return source.result[0], nil } func (m *MetaData) VpcCIDRBlock() (string, error) { - - vpcCIDR, err := m.c.Resource(VPC_CIDR_BLOCK).Go() + var vpcCIDR ResultList + err := m.New().Resource(VPC_CIDR_BLOCK).Do(&vpcCIDR) if err != nil { return "", err } - return vpcCIDR[0], err + return vpcCIDR.result[0], err } func (m *MetaData) VpcID() (string, error) { - - vpcId, err := m.c.Resource(VPC_ID).Go() + var vpcId ResultList + err := m.New().Resource(VPC_ID).Do(&vpcId) if err != nil { return "", err } - return vpcId[0], err + return vpcId.result[0], err } func (m *MetaData) VswitchCIDRBlock() (string, error) { - - cidr, err := m.c.Resource(VSWITCH_CIDR_BLOCK).Go() + var cidr ResultList + err := m.New().Resource(VSWITCH_CIDR_BLOCK).Do(&cidr) if err != nil { return "", err } - return cidr[0], err + return cidr.result[0], err } func (m *MetaData) VswitchID() (string, error) { - - vswithcid, err := m.c.Resource(VSWITCH_ID).Go() + var vswithcid ResultList + err := m.New().Resource(VSWITCH_ID).Do(&vswithcid) if err != nil { return "", err } - return vswithcid[0], err + return vswithcid.result[0], err } func (m *MetaData) EIPv4() (string, error) { - - eip, err := m.c.Resource(EIPV4).Go() + var eip ResultList + err := m.New().Resource(EIPV4).Do(&eip) if err != nil { return "", err } - return eip[0], nil + return eip.result[0], nil } func (m *MetaData) DNSNameServers() ([]string, error) { - - data, err := m.c.Resource(DNS_NAMESERVERS).Go() + var data ResultList + err := m.New().Resource(DNS_NAMESERVERS).Do(&data) if err != nil { return []string{}, err } - return data, nil + return data.result, nil } func (m *MetaData) NTPConfigServers() ([]string, error) { - - data, err := m.c.Resource(NTP_CONF_SERVERS).Go() + var data ResultList + err := m.New().Resource(NTP_CONF_SERVERS).Do(&data) if err != nil { return []string{}, err } - return data, nil + return data.result, nil +} + +func (m *MetaData) Zone() (string, error) { + var zone ResultList + err := m.New().Resource(ZONE).Do(&zone) + if err != nil { + return "", err + } + return zone.result[0], nil +} + +func (m *MetaData) RoleName() (string, error) { + var roleName ResultList + err := m.New().Resource("ram/security-credentials/").Do(&roleName) + if err != nil { + return "", err + } + return roleName.result[0], nil } +func (m *MetaData) RamRoleToken(role string) (RoleAuth, error) { + var roleauth RoleAuth + err := m.New().Resource(RAM_SECURITY).SubResource(role).Do(&roleauth) + if err != nil { + return RoleAuth{}, err + } + return roleauth, nil +} + +type requestMock func(resource string) (string, error) + // -type MetaDataClient struct { +type MetaDataRequest struct { version string resourceType string resource string + subResource string client *http.Client + + sendRequest requestMock } -func (vpc *MetaDataClient) Version(version string) IMetaDataClient { +func (vpc *MetaDataRequest) Version(version string) IMetaDataRequest { vpc.version = version return vpc } -func (vpc *MetaDataClient) ResourceType(rtype string) IMetaDataClient { +func (vpc *MetaDataRequest) ResourceType(rtype string) IMetaDataRequest { vpc.resourceType = rtype return vpc } -func (vpc *MetaDataClient) Resource(resource string) IMetaDataClient { +func (vpc *MetaDataRequest) Resource(resource string) IMetaDataRequest { vpc.resource = resource return vpc } +func (vpc *MetaDataRequest) SubResource(sub string) IMetaDataRequest { + vpc.subResource = sub + return vpc +} + var retry = util.AttemptStrategy{ Min: 5, Total: 5 * time.Second, Delay: 200 * time.Millisecond, } -func (vpc *MetaDataClient) Url() (string, error) { +func (vpc *MetaDataRequest) Url() (string, error) { if vpc.version == "" { vpc.version = "latest" } @@ -257,46 +319,75 @@ func (vpc *MetaDataClient) Url() (string, error) { if vpc.resource == "" { return "", errors.New("the resource you want to visit must not be nil!") } - return fmt.Sprintf("%s/%s/%s/%s", ENDPOINT, vpc.version, vpc.resourceType, vpc.resource), nil + endpoint := os.Getenv("METADATA_ENDPOINT") + if endpoint == "" { + endpoint = ENDPOINT + } + r := fmt.Sprintf("%s/%s/%s/%s", endpoint, vpc.version, vpc.resourceType, vpc.resource) + if vpc.subResource == "" { + return r, nil + } + return fmt.Sprintf("%s/%s", r, vpc.subResource), nil } -func (vpc *MetaDataClient) Go() (resu []string, err error) { +func (vpc *MetaDataRequest) Do(api interface{}) (err error) { + var res = "" for r := retry.Start(); r.Next(); { - resu, err = vpc.send() + if vpc.sendRequest != nil { + res, err = vpc.sendRequest(vpc.resource) + } else { + res, err = vpc.send() + } if !shouldRetry(err) { break } } - return resu, err + if err != nil { + return err + } + return vpc.Decode(res, api) } -func (vpc *MetaDataClient) send() ([]string, error) { +func (vpc *MetaDataRequest) Decode(data string, api interface{}) error { + if data == "" { + url, _ := vpc.Url() + return errors.New(fmt.Sprintf("metadata: alivpc decode data must not be nil. url=[%s]\n", url)) + } + switch api.(type) { + case *ResultList: + api.(*ResultList).result = strings.Split(data, "\n") + return nil + case *RoleAuth: + return json.Unmarshal([]byte(data), api) + default: + return errors.New(fmt.Sprintf("metadata: unknow type to decode, type=%s\n", reflect.TypeOf(api))) + } +} + +func (vpc *MetaDataRequest) send() (string, error) { url, err := vpc.Url() if err != nil { - return []string{}, err + return "", err } requ, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { - return []string{}, err + return "", err } resp, err := vpc.client.Do(requ) if err != nil { - return nil, err + return "", err } if resp.StatusCode != 200 { - return nil, err + return "", fmt.Errorf("Aliyun Metadata API Error: Status Code: %d", resp.StatusCode) } defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { - return []string{}, err - } - if string(data) == "" { - return []string{""}, nil + return "", err } - return strings.Split(string(data), "\n"), nil + return string(data), nil } type TimeoutError interface { @@ -342,3 +433,16 @@ func shouldRetry(err error) bool { } return false } + +type ResultList struct { + result []string +} + +type RoleAuth struct { + AccessKeyId string + AccessKeySecret string + Expiration time.Time + SecurityToken string + LastUpdated time.Time + Code string +} diff --git a/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/denverdino/aliyungo/util/encoding.go index 8cb588288..ec1a5b137 100644 --- a/vendor/github.com/denverdino/aliyungo/util/encoding.go +++ b/vendor/github.com/denverdino/aliyungo/util/encoding.go @@ -62,28 +62,32 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { fieldName := elemType.Field(i).Name anonymous := elemType.Field(i).Anonymous + tag := elemType.Field(i).Tag.Get("query") + argName := elemType.Field(i).Tag.Get("ArgName") field := elem.Field(i) // TODO Use Tag for validation // tag := typ.Field(i).Tag.Get("tagname") kind := field.Kind() + isPtr := false if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { continue } if kind == reflect.Ptr { field = field.Elem() kind = field.Kind() + isPtr = true } var value string //switch field.Interface().(type) { switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i := field.Int() - if i != 0 { + if i != 0 || isPtr { value = strconv.FormatInt(i, 10) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: i := field.Uint() - if i != 0 { + if i != 0 || isPtr { value = strconv.FormatUint(i, 10) } case reflect.Float32: @@ -114,15 +118,26 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { case reflect.String: l := field.Len() if l > 0 { - strArray := make([]string, l) - for i := 0; i < l; i++ { - strArray[i] = field.Index(i).String() - } - bytes, err := json.Marshal(strArray) - if err == nil { - value = string(bytes) + if tag == "list" { + name := argName + if argName == "" { + name = fieldName + } + for i := 0; i < l; i++ { + valueName := fmt.Sprintf("%s.%d", name, (i + 1)) + values.Set(valueName, field.Index(i).String()) + } } else { - log.Printf("Failed to convert JSON: %v", err) + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } } } default: @@ -160,8 +175,8 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { } } if value != "" { - name := elemType.Field(i).Tag.Get("ArgName") - if name == "" { + name := argName + if argName == "" { name = fieldName } if prefix != "" { @@ -197,12 +212,14 @@ func setQueryValuesByFlattenMethod(i interface{}, values *url.Values, prefix str // tag := typ.Field(i).Tag.Get("tagname") kind := field.Kind() + isPtr := false if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { continue } if kind == reflect.Ptr { field = field.Elem() kind = field.Kind() + isPtr = true } var value string @@ -210,12 +227,12 @@ func setQueryValuesByFlattenMethod(i interface{}, values *url.Values, prefix str switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i := field.Int() - if i != 0 { + if i != 0 || isPtr { value = strconv.FormatInt(i, 10) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: i := field.Uint() - if i != 0 { + if i != 0 || isPtr { value = strconv.FormatUint(i, 10) } case reflect.Float32: diff --git a/vendor/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/denverdino/aliyungo/util/signature.go index a00b27c19..1be6d39c7 100644 --- a/vendor/github.com/denverdino/aliyungo/util/signature.go +++ b/vendor/github.com/denverdino/aliyungo/util/signature.go @@ -35,6 +35,5 @@ func CreateSignatureForRequest(method string, values *url.Values, accessKeySecre canonicalizedQueryString := percentReplace(values.Encode()) stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) - return CreateSignature(stringToSign, accessKeySecret) } diff --git a/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/denverdino/aliyungo/util/util.go index dd68214e3..15a990da1 100644 --- a/vendor/github.com/denverdino/aliyungo/util/util.go +++ b/vendor/github.com/denverdino/aliyungo/util/util.go @@ -4,6 +4,8 @@ import ( "bytes" srand "crypto/rand" "encoding/binary" + "encoding/json" + "fmt" "math/rand" "net/http" "net/url" @@ -64,6 +66,34 @@ func Encode(v url.Values) string { return buf.String() } +// Like Encode, but key and value are not escaped +func EncodeWithoutEscape(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := k + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + if v != "" { + buf.WriteString("=") + buf.WriteString(v) + } + } + } + return buf.String() +} + func GetGMTime() string { return time.Now().UTC().Format(http.TimeFormat) } @@ -145,3 +175,11 @@ func GenerateRandomECSPassword() string { return string(s) } + +func PrettyJson(object interface{}) string { + b, err := json.MarshalIndent(object, "", " ") + if err != nil { + fmt.Printf("ERROR: PrettyJson, %v\n %s\n", err, b) + } + return string(b) +} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod deleted file mode 100644 index 591884e91..000000000 --- a/vendor/github.com/go-logr/logr/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/go-logr/logr - -go 1.14 diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000..0b4659b73 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..081c86fa8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..1e91766ae --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000..f6502e4b9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..b80c85653 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..390d4e4be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index 1ce0be2fa..f85c0cc81 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -318,7 +318,7 @@ func unescape(s string) (ch string, tail string, err error) { if i > utf8.MaxRune { return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) } - return string(i), s, nil + return string(rune(i)), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000..3496dc99d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..18b2a3318 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..165b2110d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..e0846a357 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff6420..066b4323b 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go index 4a5931009..47eb3e445 100644 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) { if i > utf8.MaxRune { return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) } - return string(i), s, nil + return string(rune(i)), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff1..85f9f5736 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c6..d3c33259d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a4..b2b55dd85 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d491..8368a3f70 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod deleted file mode 100644 index 8ec4fe9e9..000000000 --- a/vendor/github.com/google/gofuzz/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/google/gofuzz - -go 1.12 diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod deleted file mode 100644 index 9cdfaf447..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/googleapis/gax-go/v2 - -require google.golang.org/grpc v1.19.0 diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum deleted file mode 100644 index 7fa23ecf9..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/go.sum +++ /dev/null @@ -1,25 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod deleted file mode 100644 index 824cb97e8..000000000 --- a/vendor/github.com/hashicorp/golang-lru/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/golang-lru diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 000000000..8a0681af8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index b13a50ed1..d324c43ba 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,7 +1,12 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: + - go test -race -v ./... +after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index d1cefa871..aa8cbd7ce 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,43 +1,54 @@ # Mergo -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GoCard][5]][6] +[![GitHub release][5]][6] +[![GoCard][7]][8] [![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] + +[![GoCenter Kudos][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo +[16]: https://search.gocenter.io/github.com/imdario/mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -### Latest release +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status -[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) @@ -86,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) -## Installation +## Install go get github.com/imdario/mergo @@ -98,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -124,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example +Here is a nice example: ```go package main @@ -174,10 +184,10 @@ import ( "time" ) -type timeTransfomer struct { +type timeTransformer struct { } -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -201,7 +211,7 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } @@ -217,6 +227,21 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). +## Top Contributors + +[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) +[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) +[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) +[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) +[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) +[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) +[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) +[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) + + ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index 6e9aa7baf..fcd985f99 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,41 +4,140 @@ // license that can be found in the LICENSE file. /* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) Usage -From my own work-in-progress project: +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 + type Foo struct { + A string + B int64 } - type FssnConfig struct { - Network networkConfig + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} } - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { } - // Inside a function [...] + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } - // More code [...] +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 6ea38e636..a13a7ee46 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { + config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) @@ -140,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 706b22069..8c2a8fcd9 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,25 +9,43 @@ package mergo import ( + "fmt" "reflect" ) -func hasExportedField(dst reflect.Value) (exported bool) { +func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { exported = exported || len(field.PkgPath) == 0 } } return } +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool } type Transformers interface { @@ -39,6 +57,10 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return @@ -66,21 +88,34 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch dst.Kind() { case reflect.Struct: - if hasExportedField(dst) { + if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -90,6 +125,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } continue } fallthrough @@ -124,19 +162,43 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = srcSlice } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -147,19 +209,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } } case reflect.Ptr: fallthrough case reflect.Interface: - if src.IsNil() { + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } break } + if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } @@ -176,18 +260,31 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } break } + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break } default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } } } + return } @@ -199,7 +296,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { @@ -218,12 +315,37 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithAppendSlice will make merge append slices instead of overwriting it +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true } +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -243,3 +365,16 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index a82fea2fd..3cc926c7f 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,6 +20,7 @@ var ( ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore index 010c242bd..00852bd94 100644 --- a/vendor/github.com/jonboulle/clockwork/.gitignore +++ b/vendor/github.com/jonboulle/clockwork/.gitignore @@ -1,3 +1,5 @@ +/.idea/ + # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml deleted file mode 100644 index aefda90bf..000000000 --- a/vendor/github.com/jonboulle/clockwork/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.3 - -sudo: false diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md index d43a6c799..cad608357 100644 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ b/vendor/github.com/jonboulle/clockwork/README.md @@ -1,61 +1,80 @@ -clockwork -========= +# clockwork -[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) -[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#utilities) -a simple fake clock for golang +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/jonboulle/clockwork/CI?style=flat-square)](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI) +[![Go Report Card](https://goreportcard.com/badge/github.com/jonboulle/clockwork?style=flat-square)](https://goreportcard.com/report/github.com/jonboulle/clockwork) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.11-61CFDD.svg?style=flat-square) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/jonboulle/clockwork) -# Usage +**A simple fake clock for Go.** + + +## Usage Replace uses of the `time` package with the `clockwork.Clock` interface instead. For example, instead of using `time.Sleep` directly: -``` -func my_func() { +```go +func myFunc() { time.Sleep(3 * time.Second) - do_something() + doSomething() } ``` -inject a clock and use its `Sleep` method instead: +Inject a clock and use its `Sleep` method instead: -``` -func my_func(clock clockwork.Clock) { +```go +func myFunc(clock clockwork.Clock) { clock.Sleep(3 * time.Second) - do_something() + doSomething() } ``` -Now you can easily test `my_func` with a `FakeClock`: +Now you can easily test `myFunc` with a `FakeClock`: -``` +```go func TestMyFunc(t *testing.T) { c := clockwork.NewFakeClock() // Start our sleepy function - my_func(c) - - // Ensure we wait until my_func is sleeping + var wg sync.WaitGroup + wg.Add(1) + go func() { + myFunc(c) + wg.Done() + }() + + // Ensure we wait until myFunc is sleeping c.BlockUntil(1) - assert_state() + assertState() // Advance the FakeClock forward in time - c.Advance(3) + c.Advance(3 * time.Second) + + // Wait until the function completes + wg.Wait() - assert_state() + assertState() } ``` and in production builds, simply inject the real clock instead: -``` -my_func(clockwork.NewRealClock()) + +```go +myFunc(clockwork.NewRealClock()) ``` See [example_test.go](example_test.go) for a full example. + # Credits -clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) +clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](https://blog.golang.org/playground#TOC_3.1.) + + +## License + +Apache License, Version 2.0. Please see [License File](LICENSE) for more information. diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go index 9ec96ed29..1018051f4 100644 --- a/vendor/github.com/jonboulle/clockwork/clockwork.go +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -11,6 +11,8 @@ type Clock interface { After(d time.Duration) <-chan time.Time Sleep(d time.Duration) Now() time.Time + Since(t time.Time) time.Duration + NewTicker(d time.Duration) Ticker } // FakeClock provides an interface for a clock which can be @@ -60,6 +62,14 @@ func (rc *realClock) Now() time.Time { return time.Now() } +func (rc *realClock) Since(t time.Time) time.Duration { + return rc.Now().Sub(t) +} + +func (rc *realClock) NewTicker(d time.Duration) Ticker { + return &realTicker{time.NewTicker(d)} +} + type fakeClock struct { sleepers []*sleeper blockers []*blocker @@ -87,7 +97,7 @@ func (fc *fakeClock) After(d time.Duration) <-chan time.Time { defer fc.l.Unlock() now := fc.time done := make(chan time.Time, 1) - if d.Nanoseconds() == 0 { + if d.Nanoseconds() <= 0 { // special case - trigger immediately done <- now } else { @@ -130,6 +140,22 @@ func (fc *fakeClock) Now() time.Time { return t } +// Since returns the duration that has passed since the given time on the fakeClock +func (fc *fakeClock) Since(t time.Time) time.Duration { + return fc.Now().Sub(t) +} + +func (fc *fakeClock) NewTicker(d time.Duration) Ticker { + ft := &fakeTicker{ + c: make(chan time.Time, 1), + stop: make(chan bool, 1), + clock: fc, + period: d, + } + ft.runTickThread() + return ft +} + // Advance advances fakeClock to a new point in time, ensuring channels from any // previous invocations of After are notified appropriately before returning func (fc *fakeClock) Advance(d time.Duration) { diff --git a/vendor/github.com/jonboulle/clockwork/ticker.go b/vendor/github.com/jonboulle/clockwork/ticker.go new file mode 100644 index 000000000..32b5d01e7 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/ticker.go @@ -0,0 +1,72 @@ +package clockwork + +import ( + "time" +) + +// Ticker provides an interface which can be used instead of directly +// using the ticker within the time module. The real-time ticker t +// provides ticks through t.C which becomes now t.Chan() to make +// this channel requirement definable in this interface. +type Ticker interface { + Chan() <-chan time.Time + Stop() +} + +type realTicker struct{ *time.Ticker } + +func (rt *realTicker) Chan() <-chan time.Time { + return rt.C +} + +type fakeTicker struct { + c chan time.Time + stop chan bool + clock FakeClock + period time.Duration +} + +func (ft *fakeTicker) Chan() <-chan time.Time { + return ft.c +} + +func (ft *fakeTicker) Stop() { + ft.stop <- true +} + +// runTickThread initializes a background goroutine to send the tick time to the ticker channel +// after every period. Tick events are discarded if the underlying ticker channel does not have +// enough capacity. +func (ft *fakeTicker) runTickThread() { + nextTick := ft.clock.Now().Add(ft.period) + next := ft.clock.After(ft.period) + go func() { + for { + select { + case <-ft.stop: + return + case <-next: + // We send the time that the tick was supposed to occur at. + tick := nextTick + // Before sending the tick, we'll compute the next tick time and star the clock.After call. + now := ft.clock.Now() + // First, figure out how many periods there have been between "now" and the time we were + // supposed to have trigged, then advance over all of those. + skipTicks := (now.Sub(tick) + ft.period - 1) / ft.period + nextTick = nextTick.Add(skipTicks * ft.period) + // Now, keep advancing until we are past now. This should happen at most once. + for !nextTick.After(now) { + nextTick = nextTick.Add(ft.period) + } + // Figure out how long between now and the next scheduled tick, then wait that long. + remaining := nextTick.Sub(now) + next = ft.clock.After(remaining) + // Finally, we can actually send the tick. + select { + case ft.c <- tick: + default: + } + } + } + }() +} diff --git a/vendor/github.com/josharian/native/go.mod b/vendor/github.com/josharian/native/go.mod deleted file mode 100644 index 2ea760fd5..000000000 --- a/vendor/github.com/josharian/native/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/josharian/native - -go 1.13 diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod deleted file mode 100644 index e05c42ff5..000000000 --- a/vendor/github.com/json-iterator/go/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/json-iterator/go - -go 1.12 - -require ( - github.com/davecgh/go-spew v1.1.1 - github.com/google/gofuzz v1.0.0 - github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum deleted file mode 100644 index d778b5a14..000000000 --- a/vendor/github.com/json-iterator/go/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go index b9754638e..8a3d8b6fb 100644 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -288,6 +288,9 @@ non_decimal_loop: return iter.readFloat64SlowPath() } value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } } } return iter.readFloat64SlowPath() diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go index 214232035..d786a89fe 100644 --- a/vendor/github.com/json-iterator/go/iter_int.go +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -9,6 +9,7 @@ var intDigits []int8 const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 func init() { intDigits = make([]int8, 256) @@ -339,7 +340,7 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { } func (iter *Iterator) assertInteger() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { iter.ReportError("assertInteger", "can not decode float as int") } } diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 74974ba74..39acb320a 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -65,7 +65,7 @@ func (iter *Iterator) ReadVal(obj interface{}) { decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { + if typ == nil || typ.Kind() != reflect.Ptr { iter.ReportError("ReadVal", "can only unmarshal into pointer") return } diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go index f2619936c..eba434f2f 100644 --- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -33,11 +33,19 @@ type jsonRawMessageCodec struct { } func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } } func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } } func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { @@ -48,11 +56,19 @@ type jsoniterRawMessageCodec struct { } func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } } func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } } func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index d7eb0eb5c..92ae912dc 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -1075,6 +1075,11 @@ type stringModeNumberDecoder struct { } func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + c := iter.nextToken() if c != '"' { iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) diff --git a/vendor/github.com/mdlayher/genetlink/go.mod b/vendor/github.com/mdlayher/genetlink/go.mod deleted file mode 100644 index ee52e4615..000000000 --- a/vendor/github.com/mdlayher/genetlink/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/mdlayher/genetlink - -go 1.13 - -require ( - github.com/google/go-cmp v0.3.1 - github.com/mdlayher/netlink v1.0.0 - golang.org/x/net v0.0.0-20191007182048-72f939374954 - golang.org/x/sys v0.0.0-20191008105621-543471e840be -) diff --git a/vendor/github.com/mdlayher/genetlink/go.sum b/vendor/github.com/mdlayher/genetlink/go.sum deleted file mode 100644 index a8690be9e..000000000 --- a/vendor/github.com/mdlayher/genetlink/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a h1:84IpUNXj4mCR9CuCEvSiCArMbzr/TMbuPIadKDwypkI= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v1.0.0 h1:vySPY5Oxnn/8lxAPn2cK6kAzcZzYJl3KriSLO46OT18= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954 h1:JGZucVF/L/TotR719NbujzadOZ2AgnYlqphQGHDCKaU= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/mdlayher/netlink/go.mod b/vendor/github.com/mdlayher/netlink/go.mod deleted file mode 100644 index bc4d4bb58..000000000 --- a/vendor/github.com/mdlayher/netlink/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/mdlayher/netlink - -go 1.12 - -require ( - github.com/google/go-cmp v0.5.6 - github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 - github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190 - github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 - github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 - golang.org/x/net v0.0.0-20210525063256-abc453219eb5 - golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea -) diff --git a/vendor/github.com/mdlayher/netlink/go.sum b/vendor/github.com/mdlayher/netlink/go.sum deleted file mode 100644 index 5bcf1471f..000000000 --- a/vendor/github.com/mdlayher/netlink/go.sum +++ /dev/null @@ -1,87 +0,0 @@ -github.com/cilium/ebpf v0.5.0 h1:E1KshmrMEtkMP2UjlWzfmUV1owWY+BnbL5FxxuatnrU= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 h1:uhL5Gw7BINiiPAo24A2sxkcDI0Jt/sqp1v5xQCniEFA= -github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= -github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= -github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= -github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= -github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= -github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190 h1:iycCSDo8EKVueI9sfVBBJmtNn9DnXV/K1YWwEJO+uOs= -github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= -github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= -github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= -github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= -github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= -github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= -github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= -github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 h1:qEtkL8n1DAHpi5/AOgAckwGQUlMe4+jhL/GMt+GKIks= -github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea h1:+WiDlPBBaO+h9vPNZi8uJ3k4BkKQB7Iow3aqwHVA5hI= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/mdlayher/socket/go.mod b/vendor/github.com/mdlayher/socket/go.mod deleted file mode 100644 index f5636dacc..000000000 --- a/vendor/github.com/mdlayher/socket/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/mdlayher/socket - -go 1.16 - -require ( - github.com/google/go-cmp v0.5.5 - golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 - golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b -) diff --git a/vendor/github.com/mdlayher/socket/go.sum b/vendor/github.com/mdlayher/socket/go.sum deleted file mode 100644 index b03d4105a..000000000 --- a/vendor/github.com/mdlayher/socket/go.sum +++ /dev/null @@ -1,12 +0,0 @@ -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b h1:ggRgirZABFolTmi3sn6Ivd9SipZwLedQ5wR0aAKnFxU= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore index 66be63a00..1fb13abeb 100644 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -1 +1,4 @@ logrus +vendor + +.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 000000000..65dc28503 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index 2f19b4a75..c1dbd5a3a 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -1,13 +1,15 @@ language: go -go: - - 1.9.x - - 1.10.x +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 env: - - GOMAXPROCS=4 GORACE=halt_on_error=1 + - GO111MODULE=on +go: 1.15.x +os: linux install: - - go get github.com/stretchr/testify/assert - - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows + - ./travis/install.sh script: - - go test -race -v ./... + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 1bd1deb29..7567f6128 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,139 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + # 1.0.5 * Fix hooks race (#707) diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 072e99be3..5152b6aa4 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,8 +1,28 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + **Seeing weird case-sensitive problems?** It's in the past been possible to import Logrus as both upper- and lower-case. Due to the Go package environment, this caused issues in the community and we needed a standard. Some environments @@ -15,11 +35,6 @@ comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). For an in-depth explanation of the casing issue, see [this comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). -**Are you interested in assisting in maintaining Logrus?** Currently I have a -lot of obligations, and I am unable to provide Logrus with the maintainership it -needs. If you'd like to help, please reach out to me at `simon at author's -username dot com`. - Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -56,8 +71,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 ``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + #### Case-sensitivity @@ -156,7 +202,7 @@ func main() { log.Out = os.Stdout // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { // log.Out = file // } else { @@ -241,14 +287,15 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) #### Level logging -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go +log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") @@ -322,6 +369,7 @@ The built-in logging formatters are: [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). @@ -329,9 +377,13 @@ The built-in logging formatters are: Third party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -350,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // source of the official loggers. serialized, err := json.Marshal(entry.Data) if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) } return append(serialized, '\n'), nil } @@ -396,14 +448,14 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| |[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go @@ -431,7 +483,7 @@ func TestSomething(t*testing.T){ Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. ``` @@ -456,6 +508,6 @@ Situation when locking is not needed includes: 1) logger.Out is protected by locks. - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go index 8af90637a..8fd189e1c 100644 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -51,9 +51,9 @@ func Exit(code int) { os.Exit(code) } -// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke -// all handlers. The handlers will also be invoked when any Fatal log entry is -// made. +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be @@ -62,3 +62,15 @@ func Exit(code int) { func RegisterExitHandler(handler func()) { handlers = append(handlers, handler) } + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml index 96c2ce15f..df9d65c3a 100644 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -1,14 +1,14 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 000000000..4545dec07 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,52 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get() +} + +func putBuffer(buf *bytes.Buffer) { + buf.Reset() + bufferPool.Put(buf) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 473bd1a0d..07a1e5fa7 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -2,29 +2,45 @@ package logrus import ( "bytes" + "context" "fmt" "os" + "reflect" + "runtime" + "strings" "sync" "time" ) -var bufferPool *sync.Pool +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) func init() { - bufferPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - } + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger @@ -34,29 +50,51 @@ type Entry struct { // Time at which the log entry was created Time time.Time - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level - // Message passed to Debug, Info, Warn, Error, Fatal or Panic + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string - // When formatter is called in entry.log(), an Buffer may be set to entry + // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is five fields, give a little extra room - Data: make(Fields, 5), + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), } } +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { - serialized, err := entry.Logger.Formatter.Format(entry) + serialized, err := entry.Bytes() if err != nil { return "", err } @@ -69,6 +107,15 @@ func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) @@ -80,57 +127,149 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range entry.Data { data[k] = v } + fieldErr := entry.err for k, v := range fields { - data[k] = v + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } // Overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { +func (entry *Entry) log(level Level, msg string) { var buffer *bytes.Buffer - // Default to now, but allow users to override if they want. - // - // We don't have to worry about polluting future calls to Entry#log() - // with this assignment because this function is declared with a - // non-pointer receiver. - if entry.Time.IsZero() { - entry.Time = time.Now() + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() } - entry.Level = level - entry.Message = msg + newEntry.Level = level + newEntry.Message = msg - entry.fireHooks() + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + newEntry.Logger.mu.Unlock() - buffer = bufferPool.Get().(*bytes.Buffer) + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + + buffer = getBuffer() + defer func() { + newEntry.Buffer = nil + putBuffer(buffer) + }() buffer.Reset() - defer bufferPool.Put(buffer) - entry.Buffer = buffer + newEntry.Buffer = buffer - entry.write() + newEntry.write() - entry.Buffer = nil + newEntry.Buffer = nil // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { - panic(&entry) + panic(newEntry) } } func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, entry) + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } @@ -138,38 +277,41 @@ func (entry *Entry) fireHooks() { func (entry *Entry) write() { serialized, err := entry.Logger.Formatter.Format(entry) - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } else { - _, err = entry.Logger.Out.Write(serialized) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } + return + } + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) } } +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + func (entry *Entry) Print(args ...interface{}) { entry.Info(args...) } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } + entry.Log(InfoLevel, args...) } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } + entry.Log(WarnLevel, args...) } func (entry *Entry) Warning(args ...interface{}) { @@ -177,37 +319,36 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } + entry.Log(ErrorLevel, args...) } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - Exit(1) + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) + entry.Log(PanicLevel, args...) } // Entry Printf family functions -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) } } +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } + entry.Logf(InfoLevel, format, args...) } func (entry *Entry) Printf(format string, args ...interface{}) { @@ -215,9 +356,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } + entry.Logf(WarnLevel, format, args...) } func (entry *Entry) Warningf(format string, args ...interface{}) { @@ -225,36 +364,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } + entry.Logf(ErrorLevel, format, args...) } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - Exit(1) + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } + entry.Logf(PanicLevel, format, args...) } // Entry Println family functions -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) } } +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } + entry.Logln(InfoLevel, args...) } func (entry *Entry) Println(args ...interface{}) { @@ -262,9 +401,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } + entry.Logln(WarnLevel, args...) } func (entry *Entry) Warningln(args ...interface{}) { @@ -272,22 +409,16 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } + entry.Logln(ErrorLevel, args...) } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - Exit(1) + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } + entry.Logln(PanicLevel, args...) } // Sprintlnn => Sprint no newline. This is to get the behavior of how diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index eb612a6f3..017c30ce6 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -1,6 +1,7 @@ package logrus import ( + "context" "io" "time" ) @@ -21,30 +22,33 @@ func SetOutput(out io.Writer) { // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) } // SetLevel sets the standard logger level. func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.level() + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) + std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. @@ -52,6 +56,11 @@ func WithError(err error) *Entry { return std.WithField(ErrorKey, err) } +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // @@ -71,7 +80,7 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } -// WithTime creats an entry from the standard logger and overrides the time of +// WithTime creates an entry from the standard logger and overrides the time of // logs generated with it. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal @@ -80,6 +89,11 @@ func WithTime(t time.Time) *Entry { return std.WithTime(t) } +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -120,6 +134,56 @@ func Fatal(args ...interface{}) { std.Fatal(args...) } +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) @@ -160,6 +224,11 @@ func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go index 83c74947b..408883773 100644 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -2,7 +2,16 @@ package logrus import "time" -const defaultTimestampFormat = time.RFC3339 +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: @@ -18,7 +27,7 @@ type Formatter interface { Format(*Entry) ([]byte, error) } -// This is to not silently overwrite `time`, `msg` and `level` fields when +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") @@ -30,7 +39,7 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap) { +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { timeKey := fieldMap.resolve(FieldKeyTime) if t, ok := data[timeKey]; ok { data["fields."+timeKey] = t @@ -48,4 +57,22 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) { data["fields."+levelKey] = l delete(data, levelKey) } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } } diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index dab17610f..c96dc5636 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -1,8 +1,10 @@ package logrus import ( + "bytes" "encoding/json" "fmt" + "runtime" ) type fieldKey string @@ -10,13 +12,6 @@ type fieldKey string // FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string -// Default key names for the default fields -const ( - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" -) - func (f FieldMap) resolve(key fieldKey) string { if k, ok := f[key]; ok { return k @@ -28,11 +23,17 @@ func (f FieldMap) resolve(key fieldKey) string { // JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. DataKey string @@ -40,17 +41,27 @@ type JSONFormatter struct { // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", + // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool } // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) + data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: @@ -68,22 +79,50 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data = newData } - prefixFieldClashes(data, f.FieldMap) + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } if !f.DisableTimestamp { data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} } - return append(serialized, '\n'), nil + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil } diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 342f7977d..337704457 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -1,6 +1,7 @@ package logrus import ( + "context" "io" "os" "sync" @@ -8,10 +9,15 @@ import ( "time" ) +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. + // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking @@ -24,6 +30,10 @@ type Logger struct { // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. @@ -32,8 +42,12 @@ type Logger struct { mu MutexWrap // Reusable empty entry entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc } +type exitFunc func(int) + type MutexWrap struct { lock sync.Mutex disabled bool @@ -59,20 +73,22 @@ func (mw *MutexWrap) Disable() { // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // -// var log = &Logger{ +// var log = &logrus.Logger{ // Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), // Level: logrus.DebugLevel, // } // // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, } } @@ -85,11 +101,13 @@ func (logger *Logger) newEntry() *Entry { } func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } -// Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() @@ -113,6 +131,13 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + // Overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() @@ -120,20 +145,24 @@ func (logger *Logger) WithTime(t time.Time) *Entry { return entry.WithTime(t) } -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.level() >= DebugLevel { +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Debugf(format, args...) + entry.Logf(level, format, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infof(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -143,123 +172,138 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalf(format, args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panicf(format, args...) + entry.Log(level, args...) logger.releaseEntry(entry) } } -func (logger *Logger) Debug(args ...interface{}) { - if logger.level() >= DebugLevel { +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Debug(args...) + entry.Log(level, fn()...) logger.releaseEntry(entry) } } +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + func (logger *Logger) Info(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Info(args...) - logger.releaseEntry(entry) - } + logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { entry := logger.newEntry() - entry.Info(args...) + entry.Print(args...) logger.releaseEntry(entry) } func (logger *Logger) Warn(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Error(args...) - logger.releaseEntry(entry) - } + logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatal(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Log(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panic(args...) + entry.Logln(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + func (logger *Logger) Debugln(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debugln(args...) - logger.releaseEntry(entry) - } + logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infoln(args...) - logger.releaseEntry(entry) - } + logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { @@ -269,44 +313,32 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorln(args...) - logger.releaseEntry(entry) - } + logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalln(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logln(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { - if logger.level() >= PanicLevel { - entry := logger.newEntry() - entry.Panicln(args...) - logger.releaseEntry(entry) + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit } + logger.ExitFunc(code) } //When file is opened with appending mode, it's safe to @@ -320,18 +352,53 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } +// SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } -func (logger *Logger) SetOutput(out io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = out +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() } +// AddHook adds a hook to the logger hooks. func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() logger.Hooks.Add(hook) } + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index dd3899974..2f16224cb 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -14,22 +14,11 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" } - - return "unknown" } // ParseLevel takes a string level and returns the Logrus log level constant. @@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) { return InfoLevel, nil case "debug": return DebugLevel, nil + case "trace": + return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, @@ -61,6 +85,7 @@ var AllLevels = []Level{ WarnLevel, InfoLevel, DebugLevel, + TraceLevel, } // These are the different logging levels. You can set the logging level to log @@ -69,7 +94,7 @@ const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. @@ -82,6 +107,8 @@ const ( InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger @@ -140,4 +167,20 @@ type FieldLogger interface { Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) } diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 4880d13d2..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine,!gopherjs - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go index 3de08e802..2403de981 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -1,4 +1,4 @@ -// +build appengine gopherjs +// +build appengine package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 000000000..499789984 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..ebdae3ec6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 000000000..97af92c68 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go index 067047a12..3293fb3ca 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -1,18 +1,16 @@ -// +build !appengine,!gopherjs +// +build !appengine,!js,!windows,!nacl,!plan9 package logrus import ( "io" "os" - - "golang.org/x/crypto/ssh/terminal" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: - return terminal.IsTerminal(int(v.Fd())) + return isTerminal(int(v.Fd())) default: return false } diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 000000000..f6710b3bd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 000000000..04748b851 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 000000000..2879eb50e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go deleted file mode 100644 index f29a0097c..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!gopherjs - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index 3e5504030..be2c6efe5 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -3,25 +3,24 @@ package logrus import ( "bytes" "fmt" + "os" + "runtime" "sort" + "strconv" "strings" "sync" "time" + "unicode/utf8" ) const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 36 - gray = 37 + red = 31 + yellow = 33 + blue = 36 + gray = 37 ) -var ( - baseTimestamp time.Time - emptyFieldMap FieldMap -) +var baseTimestamp time.Time func init() { baseTimestamp = time.Now() @@ -35,6 +34,17 @@ type TextFormatter struct { // Force disabling colors. DisableColors bool + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool @@ -43,7 +53,10 @@ type TextFormatter struct { // the time passed since beginning of execution. FullTimestamp bool - // TimestampFormat to use for display when a full timestamp is printed + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // The fields are sorted by default for a consistent output. For applications @@ -51,9 +64,16 @@ type TextFormatter struct { // be desired. DisableSorting bool + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + // Disables the truncation of the level text to 4 characters. DisableLevelTruncation bool + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool @@ -69,26 +89,101 @@ type TextFormatter struct { // FieldKeyMsg: "@message"}} FieldMap FieldMap - sync.Once + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors } // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - prefixFieldClashes(entry.Data, f.FieldMap) - - keys := make([]string, 0, len(entry.Data)) - for k := range entry.Data { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { keys = append(keys, k) } + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + if !f.DisableSorting { - sort.Strings(keys) + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) } var b *bytes.Buffer @@ -98,26 +193,35 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { b = &bytes.Buffer{} } - f.Do(func() { f.init(entry) }) - - isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + f.terminalInitOnce.Do(func() { f.init(entry) }) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } - if isColored { - f.printColored(b, entry, keys, timestampFormat) + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) } } @@ -125,42 +229,82 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { var levelColor int switch entry.Level { - case DebugLevel: + case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: levelColor = red + case InfoLevel: + levelColor = blue default: levelColor = blue } levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation { + if !f.DisableLevelTruncation && !f.PadLevelText { levelText = levelText[0:4] } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } - if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) - } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { - v := entry.Data[k] + v := data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) f.appendValue(b, v) } } func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } if f.QuoteEmptyFields && len(text) == 0 { return true } + if f.DisableQuote { + return false + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 7bdebedc6..72e8e3a1b 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -6,10 +6,16 @@ import ( "runtime" ) +// Writer at INFO level. See WriterLevel for details. func (logger *Logger) Writer() *io.PipeWriter { return logger.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } @@ -24,6 +30,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { var printFunc func(args ...interface{}) switch level { + case TraceLevel: + printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod deleted file mode 100644 index b2287eec1..000000000 --- a/vendor/github.com/spf13/pflag/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/pflag - -go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go deleted file mode 100644 index 209f9ebad..000000000 --- a/vendor/github.com/ugorji/go/codec/0doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package understands the 'unsafe' tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. - -To install using unsafe, pass the 'unsafe' tag: - - go get -tags=unsafe github.com/ugorji/go/codec - -For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. - -The Encoder and Decoder are NOT safe for concurrent use. - -Consequently, the usage model is basically: - - - Create and initialize the Handle before any use. - Once created, DO NOT modify it. - - Multiple Encoders or Decoders can now use the Handle concurrently. - They only read information off the Handle (never write). - - However, each Encoder or Decoder MUST not be used concurrently - - To re-use an Encoder/Decoder, call Reset(...) on it first. - This allows you use state maintained on the Encoder/Decoder. - -Sample usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -*/ -package codec - -// Benefits of go-codec: -// -// - encoding/json always reads whole file into memory first. -// This makes it unsuitable for parsing very large files. -// - encoding/xml cannot parse into a map[string]interface{} -// I found this out on reading https://github.com/clbanning/mxj - -// TODO: -// -// - optimization for codecgen: -// if len of entity is <= 3 words, then support a value receiver for encode. -// - (En|De)coder should store an error when it occurs. -// Until reset, subsequent calls return that error that was stored. -// This means that free panics must go away. -// All errors must be raised through errorf method. -// - Decoding using a chan is good, but incurs concurrency costs. -// This is because there's no fast way to use a channel without it -// having to switch goroutines constantly. -// Callback pattern is still the best. Maybe consider supporting something like: -// type X struct { -// Name string -// Ys []Y -// Ys chan <- Y -// Ys func(Y) -> call this function for each entry -// } -// - Consider adding a isZeroer interface { isZero() bool } -// It is used within isEmpty, for omitEmpty support. -// - Consider making Handle used AS-IS within the encoding/decoding session. -// This means that we don't cache Handle information within the (En|De)coder, -// except we really need it at Reset(...) -// - Consider adding math/big support -// - Consider reducing the size of the generated functions: -// Maybe use one loop, and put the conditionals in the loop. -// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md deleted file mode 100644 index 91cb3a27b..000000000 --- a/vendor/github.com/ugorji/go/codec/README.md +++ /dev/null @@ -1,148 +0,0 @@ -# Codec - -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package understands the `unsafe` tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. - -To use it, you must pass the `unsafe` tag during install: - -``` -go install -tags=unsafe github.com/ugorji/go/codec -``` - -Online documentation: http://godoc.org/github.com/ugorji/go/codec -Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go deleted file mode 100644 index 33120dcb6..000000000 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ /dev/null @@ -1,929 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" - "time" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - e *Encoder - w encWriter - m map[string]uint16 // symbols - b [scratchByteArrayLen]byte - s uint16 // symbols sequencer - encNoSeparator -} - -func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { - if rt == timeTypId { - var bs []byte - switch x := v.(type) { - case time.Time: - bs = encodeTime(x) - case *time.Time: - bs = encodeTime(*x) - default: - e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) - } - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) EncodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) EncodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) EncodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:8], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:8]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) EncodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - if v >= 0 { - e.encUint(bincVdPosInt<<4, true, uint64(v)) - } else if v == -1 { - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - } else { - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) EncodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - if v == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - } else if pos && v >= 1 && v <= 16 { - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - } else if v <= math.MaxUint8 { - e.w.writen2(bd|0x0, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.encIntegerPrune(bd, pos, v, 4) - } else { - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) EncodeArrayStart(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeMapStart(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - if l == 0 { - e.encBytesLen(c_UTF8, 0) - return - } else if l == 1 { - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - } else { - e.s++ - ui = e.s - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - if l <= math.MaxUint8 { - // lenprec = 0 - } else if l <= math.MaxUint16 { - lenprec = 1 - } else if int64(l) <= math.MaxUint32 { - lenprec = 2 - } else { - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - if lenprec == 0 { - e.w.writen1(byte(l)) - } else if lenprec == 1 { - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) - } else if lenprec == 2 { - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) - } else { - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - if v <= math.MaxUint8 { - e.w.writen2(bd, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd | 0x02) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { - e.w.writen1(bd | 0x03) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecSymbol struct { - s string - b []byte - i uint16 -} - -type bincDecDriver struct { - d *Decoder - h *BincHandle - r decReader - br bool // bytes reader - bdRead bool - bd byte - vd byte - vs byte - noStreamingCodec - decNoSeparator - b [scratchByteArrayLen]byte - - // linear searching on this slice is ok, - // because we typically expect < 32 symbols in each stream. - s []bincDecSymbol -} - -func (d *bincDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true -} - -func (d *bincDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *bincDecDriver) ContainerType() (vt valueType) { - if d.vd == bincVdSpecial && d.vs == bincSpNil { - return valueTypeNil - } else if d.vd == bincVdByteArray { - return valueTypeBytes - } else if d.vd == bincVdString { - return valueTypeString - } else if d.vd == bincVdArray { - return valueTypeArray - } else if d.vd == bincVdMap { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *bincDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { - if !d.bdRead { - d.readNextBd() - } - if rt == timeTypId { - if d.vd != bincVdTimestamp { - d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - return - } - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) - return - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } - if x := d.vs & 0x7; x == bincFlBin32 { - d.decFloatPre(d.vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - } else if x == bincFlBin64 { - d.decFloatPre(d.vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - } else { - d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - return - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 3: - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:8]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:8])) - case 7: - d.r.readb(d.b[:8]) - v = uint64(bigen.Uint64(d.b[:8])) - default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") - return - } - return -} - -func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdPosInt { - ui = d.decUint() - } else if vd == bincVdNegInt { - ui = d.decUint() - neg = true - } else if vd == bincVdSmallInt { - ui = uint64(d.vs) + 1 - } else if vd == bincVdSpecial { - if vs == bincSpZero { - //i = 0 - } else if vs == bincSpNegOne { - neg = true - ui = 1 - } else { - d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) - return - } - } else { - d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - return - } - return -} - -func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("binc: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("binc: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdSpecial { - d.bdRead = false - if vs == bincSpNan { - return math.NaN() - } else if vs == bincSpPosInf { - return math.Inf(1) - } else if vs == bincSpZeroFloat || vs == bincSpZero { - return - } else if vs == bincSpNegInf { - return math.Inf(-1) - } else { - d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - return - } - } else if vd == bincVdFloat { - f = d.decFloat() - } else { - f = float64(d.DecodeInt(64)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("binc: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { - // b = false - } else if bd == (bincVdSpecial | bincSpTrue) { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadMapStart() (length int) { - if d.vd != bincVdMap { - d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadArrayStart() (length int) { - if d.vd != bincVdArray { - d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs > 3 { - return int(d.vs - 4) - } - return int(d.decLenNumber()) -} - -func (d *bincDecDriver) decLenNumber() (v uint64) { - if x := d.vs; x == 0 { - v = uint64(d.r.readn1()) - } else if x == 1 { - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - } else if x == 2 { - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - } else { - d.r.readb(d.b[:8]) - v = bigen.Uint64(d.b[:8]) - } - return -} - -func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return - } - var slen int = -1 - // var ok bool - switch d.vd { - case bincVdString, bincVdByteArray: - slen = d.decLen() - if zerocopy { - if d.br { - bs2 = d.r.readx(slen) - } else if len(bs) == 0 { - bs2 = decByteSlice(d.r, slen, d.b[:]) - } else { - bs2 = decByteSlice(d.r, slen, bs) - } - } else { - bs2 = decByteSlice(d.r, slen, bs) - } - if withString { - s = string(bs2) - } - case bincVdSymbol: - // zerocopy doesn't apply for symbols, - // as the values must be stored in a table for later use. - // - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint16 - vs := d.vs - if vs&0x8 == 0 { - symbol = uint16(d.r.readn1()) - } else { - symbol = uint16(bigen.Uint16(d.r.readx(2))) - } - if d.s == nil { - d.s = make([]bincDecSymbol, 0, 16) - } - - if vs&0x4 == 0 { - for i := range d.s { - j := &d.s[i] - if j.i == symbol { - bs2 = j.b - if withString { - if j.s == "" && bs2 != nil { - j.s = string(bs2) - } - s = j.s - } - break - } - } - } else { - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(bigen.Uint16(d.r.readx(2))) - case 2: - slen = int(bigen.Uint32(d.r.readx(4))) - case 3: - slen = int(bigen.Uint64(d.r.readx(8))) - } - // since using symbols, do not store any part of - // the parameter bs in the map, as it might be a shared buffer. - // bs2 = decByteSlice(d.r, slen, bs) - bs2 = decByteSlice(d.r, slen, nil) - if withString { - s = string(bs2) - } - d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) - } - default: - d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accommodate symbols, whose impl stores string version in map. - // Use decStringAndBytes directly. - // return string(d.DecodeBytes(d.b[:], true, true)) - _, s = d.decStringAndBytes(d.b[:], true, true) - return -} - -func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if isstring { - bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) - return - } - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return nil - } - var clen int - if d.vd == bincVdString || d.vd == bincVdByteArray { - clen = d.decLen() - } else { - d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - return - } - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdCustomExt { - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil, false, true) - } else { - d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - n.v = valueTypeNil - case bincSpFalse: - n.v = valueTypeBool - n.b = false - case bincSpTrue: - n.v = valueTypeBool - n.b = true - case bincSpNan: - n.v = valueTypeFloat - n.f = math.NaN() - case bincSpPosInf: - n.v = valueTypeFloat - n.f = math.Inf(1) - case bincSpNegInf: - n.v = valueTypeFloat - n.f = math.Inf(-1) - case bincSpZeroFloat: - n.v = valueTypeFloat - n.f = float64(0) - case bincSpZero: - n.v = valueTypeUint - n.u = uint64(0) // int8(0) - case bincSpNegOne: - n.v = valueTypeInt - n.i = int64(-1) // int8(-1) - default: - d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - n.v = valueTypeUint - n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - n.v = valueTypeUint - n.u = d.decUint() - case bincVdNegInt: - n.v = valueTypeInt - n.i = -(int64(d.decUint())) - case bincVdFloat: - n.v = valueTypeFloat - n.f = d.decFloat() - case bincVdSymbol: - n.v = valueTypeSymbol - n.s = d.DecodeString() - case bincVdString: - n.v = valueTypeString - n.s = d.DecodeString() - case bincVdByteArray: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case bincVdTimestamp: - n.v = valueTypeTimestamp - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - n.t = tt - case bincVdCustomExt: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case bincVdArray: - n.v = valueTypeArray - decodeFurther = true - case bincVdMap: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -// -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle - binaryEncodingType -} - -func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *BincHandle) newEncDriver(e *Encoder) encDriver { - return &bincEncDriver{e: e, w: e.w} -} - -func (h *BincHandle) newDecDriver(d *Decoder) decDriver { - return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *bincEncDriver) reset() { - e.w = e.e.w - e.s = 0 - e.m = nil -} - -func (d *bincDecDriver) reset() { - d.r = d.d.r - d.s = nil - d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 -} - -var _ decDriver = (*bincDecDriver)(nil) -var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go deleted file mode 100644 index 4fa349ac8..000000000 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" -) - -const ( - cborMajorUint byte = iota - cborMajorNegInt - cborMajorBytes - cborMajorText - cborMajorArray - cborMajorMap - cborMajorTag - cborMajorOther -) - -const ( - cborBdFalse byte = 0xf4 + iota - cborBdTrue - cborBdNil - cborBdUndefined - cborBdExt - cborBdFloat16 - cborBdFloat32 - cborBdFloat64 -) - -const ( - cborBdIndefiniteBytes byte = 0x5f - cborBdIndefiniteString = 0x7f - cborBdIndefiniteArray = 0x9f - cborBdIndefiniteMap = 0xbf - cborBdBreak = 0xff -) - -const ( - CborStreamBytes byte = 0x5f - CborStreamString = 0x7f - CborStreamArray = 0x9f - CborStreamMap = 0xbf - CborStreamBreak = 0xff -) - -const ( - cborBaseUint byte = 0x00 - cborBaseNegInt = 0x20 - cborBaseBytes = 0x40 - cborBaseString = 0x60 - cborBaseArray = 0x80 - cborBaseMap = 0xa0 - cborBaseTag = 0xc0 - cborBaseSimple = 0xe0 -) - -// ------------------- - -type cborEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - w encWriter - h *CborHandle - x [8]byte -} - -func (e *cborEncDriver) EncodeNil() { - e.w.writen1(cborBdNil) -} - -func (e *cborEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(cborBdTrue) - } else { - e.w.writen1(cborBdFalse) - } -} - -func (e *cborEncDriver) EncodeFloat32(f float32) { - e.w.writen1(cborBdFloat32) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *cborEncDriver) EncodeFloat64(f float64) { - e.w.writen1(cborBdFloat64) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *cborEncDriver) encUint(v uint64, bd byte) { - if v <= 0x17 { - e.w.writen1(byte(v) + bd) - } else if v <= math.MaxUint8 { - e.w.writen2(bd+0x18, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 0x19) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 0x1a) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 0x1b) - bigenHelper{e.x[:8], e.w}.writeUint64(v) - } -} - -func (e *cborEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-1-v), cborBaseNegInt) - } else { - e.encUint(uint64(v), cborBaseUint) - } -} - -func (e *cborEncDriver) EncodeUint(v uint64) { - e.encUint(v, cborBaseUint) -} - -func (e *cborEncDriver) encLen(bd byte, length int) { - e.encUint(uint64(length), bd) -} - -func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - e.encUint(uint64(xtag), cborBaseTag) - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - e.encUint(uint64(re.Tag), cborBaseTag) - if re.Data != nil { - en.encode(re.Data) - } else if re.Value == nil { - e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *cborEncDriver) EncodeArrayStart(length int) { - e.encLen(cborBaseArray, length) -} - -func (e *cborEncDriver) EncodeMapStart(length int) { - e.encLen(cborBaseMap, length) -} - -func (e *cborEncDriver) EncodeString(c charEncoding, v string) { - e.encLen(cborBaseString, len(v)) - e.w.writestr(v) -} - -func (e *cborEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - if c == c_RAW { - e.encLen(cborBaseBytes, len(v)) - } else { - e.encLen(cborBaseString, len(v)) - } - e.w.writeb(v) -} - -// ---------------------- - -type cborDecDriver struct { - d *Decoder - h *CborHandle - r decReader - b [scratchByteArrayLen]byte - br bool // bytes reader - bdRead bool - bd byte - noBuiltInTypes - decNoSeparator -} - -func (d *cborDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *cborDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *cborDecDriver) ContainerType() (vt valueType) { - if d.bd == cborBdNil { - return valueTypeNil - } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { - return valueTypeBytes - } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { - return valueTypeString - } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { - return valueTypeArray - } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *cborDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - // treat Nil and Undefined as nil values - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) CheckBreak() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdBreak { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) decUint() (ui uint64) { - v := d.bd & 0x1f - if v <= 0x17 { - ui = uint64(v) - } else { - if v == 0x18 { - ui = uint64(d.r.readn1()) - } else if v == 0x19 { - ui = uint64(bigen.Uint16(d.r.readx(2))) - } else if v == 0x1a { - ui = uint64(bigen.Uint32(d.r.readx(4))) - } else if v == 0x1b { - ui = uint64(bigen.Uint64(d.r.readx(8))) - } else { - d.d.errorf("decUint: Invalid descriptor: %v", d.bd) - return - } - } - return -} - -func (d *cborDecDriver) decCheckInteger() (neg bool) { - if !d.bdRead { - d.readNextBd() - } - major := d.bd >> 5 - if major == cborMajorUint { - } else if major == cborMajorNegInt { - neg = true - } else { - d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) - return - } - return -} - -func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { - neg := d.decCheckInteger() - ui := d.decUint() - // check if this number can be converted to an int without overflow - var overflow bool - if neg { - if i, overflow = chkOvf.SignedInt(ui + 1); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) - return - } - i = -i - } else { - if i, overflow = chkOvf.SignedInt(ui); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui) - return - } - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("cbor: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if d.decCheckInteger() { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - ui = d.decUint() - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("cbor: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdFloat16 { - f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) - } else if bd == cborBdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if bd == cborBdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else if bd >= cborBaseUint && bd < cborBaseBytes { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) - return - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("cbor: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *cborDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdTrue { - b = true - } else if bd == cborBdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) ReadMapStart() (length int) { - d.bdRead = false - if d.bd == cborBdIndefiniteMap { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) ReadArrayStart() (length int) { - d.bdRead = false - if d.bd == cborBdIndefiniteArray { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) decLen() int { - return int(d.decUint()) -} - -func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { - d.bdRead = false - for { - if d.CheckBreak() { - break - } - if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { - d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) - return nil - } - n := d.decLen() - oldLen := len(bs) - newLen := oldLen + n - if newLen > cap(bs) { - bs2 := make([]byte, newLen, 2*cap(bs)+n) - copy(bs2, bs) - bs = bs2 - } else { - bs = bs[:newLen] - } - d.r.readb(bs[oldLen:newLen]) - // bs = append(bs, d.r.readn()...) - d.bdRead = false - } - d.bdRead = false - return bs -} - -func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return nil - } - if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { - if bs == nil { - return d.decAppendIndefiniteBytes(nil) - } - return d.decAppendIndefiniteBytes(bs[:0]) - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *cborDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if !d.bdRead { - d.readNextBd() - } - u := d.decUint() - d.bdRead = false - realxtag = u - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - d.d.decode(&re.Value) - } else if xtag != realxtag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) - return - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.bd { - case cborBdNil: - n.v = valueTypeNil - case cborBdFalse: - n.v = valueTypeBool - n.b = false - case cborBdTrue: - n.v = valueTypeBool - n.b = true - case cborBdFloat16, cborBdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case cborBdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case cborBdIndefiniteBytes: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case cborBdIndefiniteString: - n.v = valueTypeString - n.s = d.DecodeString() - case cborBdIndefiniteArray: - n.v = valueTypeArray - decodeFurther = true - case cborBdIndefiniteMap: - n.v = valueTypeMap - decodeFurther = true - default: - switch { - case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case d.bd >= cborBaseBytes && d.bd < cborBaseString: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case d.bd >= cborBaseString && d.bd < cborBaseArray: - n.v = valueTypeString - n.s = d.DecodeString() - case d.bd >= cborBaseArray && d.bd < cborBaseMap: - n.v = valueTypeArray - decodeFurther = true - case d.bd >= cborBaseMap && d.bd < cborBaseTag: - n.v = valueTypeMap - decodeFurther = true - case d.bd >= cborBaseTag && d.bd < cborBaseSimple: - n.v = valueTypeExt - n.u = d.decUint() - n.l = nil - // d.bdRead = false - // d.d.decode(&re.Value) // handled by decode itself. - // decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - return - } - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -// ------------------------- - -// CborHandle is a Handle for the CBOR encoding format, -// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . -// -// CBOR is comprehensively supported, including support for: -// - indefinite-length arrays/maps/bytes/strings -// - (extension) tags in range 0..0xffff (0 .. 65535) -// - half, single and double-precision floats -// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) -// - nil, true, false, ... -// - arrays and maps, bytes and text strings -// -// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. -// Users can implement them as needed (using SetExt), including spec-documented ones: -// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. -// -// To encode with indefinite lengths (streaming), users will use -// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. -// -// For example, to encode "one-byte" as an indefinite length string: -// var buf bytes.Buffer -// e := NewEncoder(&buf, new(CborHandle)) -// buf.WriteByte(CborStreamString) -// e.MustEncode("one-") -// e.MustEncode("byte") -// buf.WriteByte(CborStreamBreak) -// encodedBytes := buf.Bytes() -// var vv interface{} -// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) -// // Now, vv contains the same string "one-byte" -// -type CborHandle struct { - binaryEncodingType - BasicHandle -} - -func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *CborHandle) newEncDriver(e *Encoder) encDriver { - return &cborEncDriver{e: e, w: e.w, h: h} -} - -func (h *CborHandle) newDecDriver(d *Decoder) decDriver { - return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *cborEncDriver) reset() { - e.w = e.e.w -} - -func (d *cborDecDriver) reset() { - d.r = d.d.r - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*cborDecDriver)(nil) -var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go deleted file mode 100644 index 52c1dfe83..000000000 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "errors" - "fmt" - "io" - "reflect" - "time" -) - -// Some tagging information for error messages. -const ( - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -var ( - onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") - cannotDecodeIntoNilErr = errors.New("cannot decode into nil") -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - unreadn1() - - // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR - // just return a view of the []byte being decoded from. - // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. - readx(n int) []byte - readb([]byte) - readn1() uint8 - readn1eof() (v uint8, eof bool) - numread() int // number of bytes read - track() - stopTrack() []byte -} - -type decReaderByteScanner interface { - io.Reader - io.ByteScanner -} - -type decDriver interface { - // this will check if the next token is a break. - CheckBreak() bool - TryDecodeAsNil() bool - // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. - ContainerType() (vt valueType) - IsBuiltinType(rt uintptr) bool - DecodeBuiltin(rt uintptr, v interface{}) - - // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. - // For maps and arrays, it will not do the decoding in-band, but will signal - // the decoder, so that is done later, by setting the decNaked.valueType field. - // - // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - // for extensions, DecodeNaked must read the tag and the []byte if it exists. - // if the []byte is not read, then kInterfaceNaked will treat it as a Handle - // that stores the subsequent value in-band, and complete reading the RawExt. - // - // extensions should also use readx to decode them, for efficiency. - // kInterface will extract the detached byte slice if it has to pass it outside its realm. - DecodeNaked() - DecodeInt(bitsize uint8) (i int64) - DecodeUint(bitsize uint8) (ui uint64) - DecodeFloat(chkOverflow32 bool) (f float64) - DecodeBool() (b bool) - // DecodeString can also decode symbols. - // It looks redundant as DecodeBytes is available. - // However, some codecs (e.g. binc) support symbols and can - // return a pre-stored string value, meaning that it can bypass - // the cost of []byte->string conversion. - DecodeString() (s string) - - // DecodeBytes may be called directly, without going through reflection. - // Consequently, it must be designed to handle possible nil. - DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) - - // decodeExt will decode into a *RawExt or into an extension. - DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) - // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - ReadMapStart() int - ReadArrayStart() int - - reset() - uncacheRead() -} - -type decNoSeparator struct { -} - -func (_ decNoSeparator) ReadEnd() {} - -// func (_ decNoSeparator) uncacheRead() {} - -type DecodeOptions struct { - // MapType specifies type to use during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - - // SliceType specifies type to use during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - - // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with. - // If 0 or negative, we default to a sensible value based on the size of an element in the collection. - // - // For example, when decoding, a stream may say that it has MAX_UINT elements. - // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. - // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. - MaxInitLen int - - // If ErrorIfNoField, return an error when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool - - // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. - // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, - // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). - ErrorIfNoArrayExpand bool - - // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). - SignedInteger bool - - // MapValueReset controls how we decode into a map value. - // - // By default, we MAY retrieve the mapping for a key, and then decode into that. - // However, especially with big maps, that retrieval may be expensive and unnecessary - // if the stream already contains all that is necessary to recreate the value. - // - // If true, we will never retrieve the previous mapping, - // but rather decode into a new value and set that in the map. - // - // If false, we will retrieve the previous mapping if necessary e.g. - // the previous mapping is a pointer, or is a struct or array with pre-set state, - // or is an interface. - MapValueReset bool - - // InterfaceReset controls how we decode into an interface. - // - // By default, when we see a field that is an interface{...}, - // or a map with interface{...} value, we will attempt decoding into the - // "contained" value. - // - // However, this prevents us from reading a string into an interface{} - // that formerly contained a number. - // - // If true, we will decode into a new "blank" value, and set that in the interface. - // If false, we will decode into whatever is contained in the interface. - InterfaceReset bool - - // InternString controls interning of strings during decoding. - // - // Some handles, e.g. json, typically will read map keys as strings. - // If the set of keys are finite, it may help reduce allocation to - // look them up from a map (than to allocate them afresh). - // - // Note: Handles will be smart when using the intern functionality. - // So everything will not be interned. - InternString bool - - // PreferArrayOverSlice controls whether to decode to an array or a slice. - // - // This only impacts decoding into a nil interface{}. - // Consequently, it has no effect on codecgen. - // - // *Note*: This only applies if using go1.5 and above, - // as it requires reflect.ArrayOf support which was absent before go1.5. - PreferArrayOverSlice bool -} - -// ------------------------------------ - -// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods -// of io.Reader, io.ByteScanner. -type ioDecByteScanner struct { - r io.Reader - l byte // last byte - ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread - b [1]byte // tiny buffer for reading single bytes -} - -func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { - var firstByte bool - if z.ls == 1 { - z.ls = 2 - p[0] = z.l - if len(p) == 1 { - n = 1 - return - } - firstByte = true - p = p[1:] - } - n, err = z.r.Read(p) - if n > 0 { - if err == io.EOF && n == len(p) { - err = nil // read was successful, so postpone EOF (till next time) - } - z.l = p[n-1] - z.ls = 2 - } - if firstByte { - n++ - } - return -} - -func (z *ioDecByteScanner) ReadByte() (c byte, err error) { - n, err := z.Read(z.b[:]) - if n == 1 { - c = z.b[0] - if err == io.EOF { - err = nil // read was successful, so postpone EOF (till next time) - } - } - return -} - -func (z *ioDecByteScanner) UnreadByte() (err error) { - x := z.ls - if x == 0 { - err = errors.New("cannot unread - nothing has been read") - } else if x == 1 { - err = errors.New("cannot unread - last byte has not been read") - } else if x == 2 { - z.ls = 1 - } - return -} - -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - br decReaderByteScanner - // temp byte array re-used internally for efficiency during read. - // shares buffer with Decoder, so we keep size of struct within 8 words. - x *[scratchByteArrayLen]byte - bs ioDecByteScanner - n int // num read - tr []byte // tracking bytes read - trb bool -} - -func (z *ioDecReader) numread() int { - return z.n -} - -func (z *ioDecReader) readx(n int) (bs []byte) { - if n <= 0 { - return - } - if n < len(z.x) { - bs = z.x[:n] - } else { - bs = make([]byte, n) - } - if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { - panic(err) - } - z.n += len(bs) - if z.trb { - z.tr = append(z.tr, bs...) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := io.ReadAtLeast(z.br, bs, len(bs)) - z.n += n - if err != nil { - panic(err) - } - if z.trb { - z.tr = append(z.tr, bs...) - } -} - -func (z *ioDecReader) readn1() (b uint8) { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - return b -} - -func (z *ioDecReader) readn1eof() (b uint8, eof bool) { - b, err := z.br.ReadByte() - if err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - } else if err == io.EOF { - eof = true - } else { - panic(err) - } - return -} - -func (z *ioDecReader) unreadn1() { - err := z.br.UnreadByte() - if err != nil { - panic(err) - } - z.n-- - if z.trb { - if l := len(z.tr) - 1; l >= 0 { - z.tr = z.tr[:l] - } - } -} - -func (z *ioDecReader) track() { - if z.tr != nil { - z.tr = z.tr[:0] - } - z.trb = true -} - -func (z *ioDecReader) stopTrack() (bs []byte) { - z.trb = false - return z.tr -} - -// ------------------------------------ - -var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available - t int // track start -} - -func (z *bytesDecReader) reset(in []byte) { - z.b = in - z.a = len(in) - z.c = 0 - z.t = 0 -} - -func (z *bytesDecReader) numread() int { - return z.c -} - -func (z *bytesDecReader) unreadn1() { - if z.c == 0 || len(z.b) == 0 { - panic(bytesDecReaderCannotUnreadErr) - } - z.c-- - z.a++ - return -} - -func (z *bytesDecReader) readx(n int) (bs []byte) { - // slicing from a non-constant start position is more expensive, - // as more computation is required to decipher the pointer start position. - // However, we do it only once, and it's better than reslicing both z.b and return value. - - if n <= 0 { - } else if z.a == 0 { - panic(io.EOF) - } else if n > z.a { - panic(io.ErrUnexpectedEOF) - } else { - c0 := z.c - z.c = c0 + n - z.a = z.a - n - bs = z.b[c0:z.c] - } - return -} - -func (z *bytesDecReader) readn1() (v uint8) { - if z.a == 0 { - panic(io.EOF) - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { - if z.a == 0 { - eof = true - return - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readx(len(bs))) -} - -func (z *bytesDecReader) track() { - z.t = z.c -} - -func (z *bytesDecReader) stopTrack() (bs []byte) { - return z.b[z.t:z.c] -} - -// ------------------------------------ - -type decFnInfo struct { - d *Decoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - -// ---------------------------------------- - -type decFn struct { - i decFnInfo - f func(*decFnInfo, reflect.Value) -} - -func (f *decFnInfo) builtin(rv reflect.Value) { - f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) -} - -func (f *decFnInfo) rawExt(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) -} - -func (f *decFnInfo) raw(rv reflect.Value) { - rv.SetBytes(f.d.raw()) -} - -func (f *decFnInfo) ext(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) -} - -func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { - if indir == -1 { - v = rv.Addr().Interface() - } else if indir == 0 { - v = rv.Interface() - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - v = rv.Interface() - } - return -} - -func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { - f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) -} - -func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { - bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) - xbs := f.d.d.DecodeBytes(nil, false, true) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) textUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) - // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) kErr(rv reflect.Value) { - f.d.errorf("no decoding function defined for kind %v", rv.Kind()) -} - -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.d.d.DecodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.d.d.DecodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUintptr(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - -// var kIntfCtr uint64 - -func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { - // nil interface: - // use some hieristics to decode it appropriately - // based on the detected next value in the stream. - d := f.d - d.d.DecodeNaked() - n := &d.n - if n.v == valueTypeNil { - return - } - // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). - // if num := f.ti.rt.NumMethod(); num > 0 { - if f.ti.numMeth > 0 { - d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) - return - } - // var useRvn bool - switch n.v { - case valueTypeMap: - // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { - // } else if d.h.MapType == mapStrIntfTyp { // for json performance - // } - if d.mtid == 0 || d.mtid == mapIntfIntfTypId { - l := len(n.ms) - n.ms = append(n.ms, nil) - var v2 interface{} = &n.ms[l] - d.decode(v2) - rvn = reflect.ValueOf(v2).Elem() - n.ms = n.ms[:l] - } else if d.mtid == mapStrIntfTypId { // for json performance - l := len(n.ns) - n.ns = append(n.ns, nil) - var v2 interface{} = &n.ns[l] - d.decode(v2) - rvn = reflect.ValueOf(v2).Elem() - n.ns = n.ns[:l] - } else { - rvn = reflect.New(d.h.MapType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeArray: - // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { - if d.stid == 0 || d.stid == intfSliceTypId { - l := len(n.ss) - n.ss = append(n.ss, nil) - var v2 interface{} = &n.ss[l] - d.decode(v2) - n.ss = n.ss[:l] - rvn = reflect.ValueOf(v2).Elem() - if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { - rvn = reflectArrayOf(rvn) - } - } else { - rvn = reflect.New(d.h.SliceType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeExt: - var v interface{} - tag, bytes := n.u, n.l // calling decode below might taint the values - if bytes == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - d.decode(v2) - v = *v2 - n.is = n.is[:l] - } - bfn := d.h.getExtForTag(tag) - if bfn == nil { - var re RawExt - re.Tag = tag - re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) - rvn = reflect.ValueOf(re) - } else { - rvnA := reflect.New(bfn.rt) - rvn = rvnA.Elem() - if bytes != nil { - bfn.ext.ReadExt(rvnA.Interface(), bytes) - } else { - bfn.ext.UpdateExt(rvnA.Interface(), v) - } - } - case valueTypeNil: - // no-op - case valueTypeInt: - rvn = reflect.ValueOf(&n.i).Elem() - case valueTypeUint: - rvn = reflect.ValueOf(&n.u).Elem() - case valueTypeFloat: - rvn = reflect.ValueOf(&n.f).Elem() - case valueTypeBool: - rvn = reflect.ValueOf(&n.b).Elem() - case valueTypeString, valueTypeSymbol: - rvn = reflect.ValueOf(&n.s).Elem() - case valueTypeBytes: - rvn = reflect.ValueOf(&n.l).Elem() - case valueTypeTimestamp: - rvn = reflect.ValueOf(&n.t).Elem() - default: - panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) - } - return -} - -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - - // Note: - // A consequence of how kInterface works, is that - // if an interface already contains something, we try - // to decode into what was there before. - // We do not replace with a generic value (as got from decodeNaked). - - var rvn reflect.Value - if rv.IsNil() { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { - rv.Set(rvn) - } - } else if f.d.h.InterfaceReset { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { - rv.Set(rvn) - } else { - // reset to zero value based on current type in there. - rv.Set(reflect.Zero(rv.Elem().Type())) - } - } else { - rvn = rv.Elem() - // Note: interface{} is settable, but underlying type may not be. - // Consequently, we have to set the reflect.Value directly. - // if underlying type is settable (e.g. ptr or interface), - // we just decode into it. - // Else we create a settable value, decode into it, and set on the interface. - if rvn.CanSet() { - f.d.decodeValue(rvn, nil) - } else { - rvn2 := reflect.New(rvn.Type()).Elem() - rvn2.Set(rvn) - f.d.decodeValue(rvn2, nil) - rv.Set(rvn2) - } - } -} - -func (f *decFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - d := f.d - dd := d.d - cr := d.cr - ctyp := dd.ContainerType() - if ctyp == valueTypeMap { - containerLen := dd.ReadMapStart() - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - tisfi := fti.sfi - hasLen := containerLen >= 0 - if hasLen { - for j := 0; j < containerLen; j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - } - } else { - for j := 0; !dd.CheckBreak(); j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - } else if ctyp == valueTypeArray { - containerLen := dd.ReadArrayStart() - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - return - } - // Not much gain from doing it two ways for array. - // Arrays are not used as much for structs. - hasLen := containerLen >= 0 - for j, si := range fti.sfip { - if hasLen { - if j == containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - d.structFieldNotFound(j, "") - } - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - } else { - f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) - return - } -} - -func (f *decFnInfo) kSlice(rv reflect.Value) { - // A slice can be set from a map or array in stream. - // This way, the order can be kept (as order is lost with map). - ti := f.ti - d := f.d - dd := d.d - rtelem0 := ti.rt.Elem() - ctyp := dd.ContainerType() - if ctyp == valueTypeBytes || ctyp == valueTypeString { - // you can only decode bytes or string in the stream into a slice or array of bytes - if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { - f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) - } - if f.seq == seqTypeChan { - bs2 := dd.DecodeBytes(nil, false, true) - ch := rv.Interface().(chan<- byte) - for _, b := range bs2 { - ch <- b - } - } else { - rvbs := rv.Bytes() - bs2 := dd.DecodeBytes(rvbs, false, false) - if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { - if rv.CanSet() { - rv.SetBytes(bs2) - } else { - copy(rvbs, bs2) - } - } - } - return - } - - // array := f.seq == seqTypeChan - - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - - // // an array can never return a nil slice. so no need to check f.array here. - if containerLenS == 0 { - if f.seq == seqTypeSlice { - if rv.IsNil() { - rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) - } else { - rv.SetLen(0) - } - } else if f.seq == seqTypeChan { - if rv.IsNil() { - rv.Set(reflect.MakeChan(ti.rt, 0)) - } - } - slh.End() - return - } - - rtelem := rtelem0 - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - fn := d.getDecFn(rtelem, true, true) - - var rv0, rv9 reflect.Value - rv0 = rv - rvChanged := false - - // for j := 0; j < containerLenS; j++ { - var rvlen int - if containerLenS > 0 { // hasLen - if f.seq == seqTypeChan { - if rv.IsNil() { - rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - rv.Set(reflect.MakeChan(ti.rt, rvlen)) - } - // handle chan specially: - for j := 0; j < containerLenS; j++ { - rv9 = reflect.New(rtelem0).Elem() - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - rv.Send(rv9) - } - } else { // slice or array - var truncated bool // says len of sequence is not same as expected number of elements - numToRead := containerLenS // if truncated, reset numToRead - - rvcap := rv.Cap() - rvlen = rv.Len() - if containerLenS > rvcap { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, containerLenS) - } else { - oldRvlenGtZero := rvlen > 0 - rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - if truncated { - if rvlen <= rvcap { - rv.SetLen(rvlen) - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { - reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) - } - rvcap = rvlen - } - numToRead = rvlen - } else if containerLenS != rvlen { - if f.seq == seqTypeSlice { - rv.SetLen(containerLenS) - rvlen = containerLenS - } - } - j := 0 - // we read up to the numToRead - for ; j < numToRead; j++ { - slh.ElemContainerState(j) - d.decodeValue(rv.Index(j), fn) - } - - // if slice, expand and read up to containerLenS (or EOF) iff truncated - // if array, swallow all the rest. - - if f.seq == seqTypeArray { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } else if truncated { // slice was truncated, as chan NOT in this block - for ; j < containerLenS; j++ { - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - } - } - } - } else { - rvlen = rv.Len() - j := 0 - for ; !dd.CheckBreak(); j++ { - if f.seq == seqTypeChan { - slh.ElemContainerState(j) - rv9 = reflect.New(rtelem0).Elem() - d.decodeValue(rv9, fn) - rv.Send(rv9) - } else { - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= rvlen { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, j+1) - decodeIntoBlank = true - } else { // if f.seq == seqTypeSlice - // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - rvlen++ - rvChanged = true - } - } else { // slice or array - rv9 = rv.Index(j) - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { // seqTypeSlice - d.decodeValue(rv9, fn) - } - } - } - if f.seq == seqTypeSlice { - if j < rvlen { - rv.SetLen(j) - } else if j == 0 && rv.IsNil() { - rv = reflect.MakeSlice(ti.rt, 0, 0) - rvChanged = true - } - } - } - slh.End() - - if rvChanged { - rv0.Set(rv) - } -} - -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} - -func (f *decFnInfo) kMap(rv reflect.Value) { - d := f.d - dd := d.d - containerLen := dd.ReadMapStart() - cr := d.cr - ti := f.ti - if rv.IsNil() { - rv.Set(reflect.MakeMap(ti.rt)) - } - - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - - ktype, vtype := ti.rt.Key(), ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - vtypeKind := vtype.Kind() - var keyFn, valFn *decFn - var xtyp reflect.Type - for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { - } - keyFn = d.getDecFn(xtyp, true, true) - for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { - } - valFn = d.getDecFn(xtyp, true, true) - var mapGet, mapSet bool - if !f.d.h.MapValueReset { - // if pointer, mapGet = true - // if interface, mapGet = true if !DecodeNakedAlways (else false) - // if builtin, mapGet = false - // else mapGet = true - if vtypeKind == reflect.Ptr { - mapGet = true - } else if vtypeKind == reflect.Interface { - if !f.d.h.InterfaceReset { - mapGet = true - } - } else if !isImmutableKind(vtypeKind) { - mapGet = true - } - } - - var rvk, rvv, rvz reflect.Value - - // for j := 0; j < containerLen; j++ { - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - } - } else { - for j := 0; !dd.CheckBreak(); j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -type decRtidFn struct { - rtid uintptr - fn decFn -} - -// decNaked is used to keep track of the primitives decoded. -// Without it, we would have to decode each primitive and wrap it -// in an interface{}, causing an allocation. -// In this model, the primitives are decoded in a "pseudo-atomic" fashion, -// so we can rest assured that no other decoding happens while these -// primitives are being decoded. -// -// maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accommodate for extensions that decode -// RawExt from DecodeNaked, but need to decode the value subsequently. -// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. -// -// However, decNaked also keeps some arrays of default maps and slices -// used in DecodeNaked. This way, we can get a pointer to it -// without causing a new heap allocation. -// -// kInterfaceNaked will ensure that there is no allocation for the common -// uses. -type decNaked struct { - // r RawExt // used for RawExt, uint, []byte. - u uint64 - i int64 - f float64 - l []byte - s string - t time.Time - b bool - v valueType - - // stacks for reducing allocation - is []interface{} - ms []map[interface{}]interface{} - ns []map[string]interface{} - ss [][]interface{} - // rs []RawExt - - // keep arrays at the bottom? Chance is that they are not used much. - ia [4]interface{} - ma [4]map[interface{}]interface{} - na [4]map[string]interface{} - sa [4][]interface{} - // ra [2]RawExt -} - -func (n *decNaked) reset() { - if n.ss != nil { - n.ss = n.ss[:0] - } - if n.is != nil { - n.is = n.is[:0] - } - if n.ms != nil { - n.ms = n.ms[:0] - } - if n.ns != nil { - n.ns = n.ns[:0] - } -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. - // Try to put things that go together to fit within a cache line (8 words). - - d decDriver - // NOTE: Decoder shouldn't call it's read methods, - // as the handler MAY need to do some coordination. - r decReader - // sa [initCollectionCap]decRtidFn - h *BasicHandle - hh Handle - - be bool // is binary encoding - bytes bool // is bytes reader - js bool // is json handle - - rb bytesDecReader - ri ioDecReader - cr containerStateRecv - - s []decRtidFn - f map[uintptr]*decFn - - // _ uintptr // for alignment purposes, so next one starts from a cache line - - // cache the mapTypeId and sliceTypeId for faster comparisons - mtid uintptr - stid uintptr - - n decNaked - b [scratchByteArrayLen]byte - is map[string]string // used for interning strings -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered reader -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - d := newDecoder(h) - d.Reset(r) - return d -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - d := newDecoder(h) - d.ResetBytes(in) - return d -} - -func newDecoder(h Handle) *Decoder { - d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - n := &d.n - // n.rs = n.ra[:0] - n.ms = n.ma[:0] - n.is = n.ia[:0] - n.ns = n.na[:0] - n.ss = n.sa[:0] - _, d.js = h.(*JsonHandle) - if d.h.InternString { - d.is = make(map[string]string, 32) - } - d.d = h.newDecDriver(d) - d.cr, _ = d.d.(containerStateRecv) - // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) - return d -} - -func (d *Decoder) resetCommon() { - d.n.reset() - d.d.reset() - // reset all things which were cached from the Handle, - // but could be changed. - d.mtid, d.stid = 0, 0 - if d.h.MapType != nil { - d.mtid = reflect.ValueOf(d.h.MapType).Pointer() - } - if d.h.SliceType != nil { - d.stid = reflect.ValueOf(d.h.SliceType).Pointer() - } -} - -func (d *Decoder) Reset(r io.Reader) { - d.ri.x = &d.b - // d.s = d.sa[:0] - d.ri.bs.r = r - var ok bool - d.ri.br, ok = r.(decReaderByteScanner) - if !ok { - d.ri.br = &d.ri.bs - } - d.r = &d.ri - d.resetCommon() -} - -func (d *Decoder) ResetBytes(in []byte) { - // d.s = d.sa[:0] - d.rb.reset(in) - d.r = &d.rb - d.resetCommon() -} - -// func (d *Decoder) sendContainerState(c containerState) { -// if d.cr != nil { -// d.cr.sendContainerState(c) -// } -// } - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) - return -} - -// this is not a smart swallow, as it allocates objects and does unnecessary work. -func (d *Decoder) swallowViaHammer() { - var blank interface{} - d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) -} - -func (d *Decoder) swallow() { - // smarter decode that just swallows the content - dd := d.d - if dd.TryDecodeAsNil() { - return - } - cr := d.cr - switch dd.ContainerType() { - case valueTypeMap: - containerLen := dd.ReadMapStart() - clenGtEqualZero := containerLen >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.swallow() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.swallow() - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - case valueTypeArray: - containerLenS := dd.ReadArrayStart() - clenGtEqualZero := containerLenS >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLenS { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - d.swallow() - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - case valueTypeBytes: - dd.DecodeBytes(d.b[:], false, true) - case valueTypeString: - dd.DecodeBytes(d.b[:], true, true) - // dd.DecodeStringAsBytes(d.b[:]) - default: - // these are all primitives, which we can get from decodeNaked - // if RawExt using Value, complete the processing. - dd.DecodeNaked() - if n := &d.n; n.v == valueTypeExt && n.l == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - d.decode(v2) - n.is = n.is[:l] - } - } -} - -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { - d.decode(v) -} - -func (d *Decoder) decode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecDecodeSelf(d) - // return - // } - - if d.d.TryDecodeAsNil() { - switch v := iv.(type) { - case nil: - case *string: - *v = "" - case *bool: - *v = false - case *int: - *v = 0 - case *int8: - *v = 0 - case *int16: - *v = 0 - case *int32: - *v = 0 - case *int64: - *v = 0 - case *uint: - *v = 0 - case *uint8: - *v = 0 - case *uint16: - *v = 0 - case *uint32: - *v = 0 - case *uint64: - *v = 0 - case *float32: - *v = 0 - case *float64: - *v = 0 - case *[]uint8: - *v = nil - case *Raw: - *v = nil - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - v = v.Elem() - if v.IsValid() { - v.Set(reflect.Zero(v.Type())) - } - default: - rv := reflect.ValueOf(iv) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - rv = rv.Elem() - if rv.IsValid() { - rv.Set(reflect.Zero(rv.Type())) - } - } - return - } - - switch v := iv.(type) { - case nil: - d.error(cannotDecodeIntoNilErr) - return - - case Selfer: - v.CodecDecodeSelf(d) - - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - d.decodeValueNotNil(v.Elem(), nil) - - case *string: - *v = d.d.DecodeString() - case *bool: - *v = d.d.DecodeBool() - case *int: - *v = int(d.d.DecodeInt(intBitsize)) - case *int8: - *v = int8(d.d.DecodeInt(8)) - case *int16: - *v = int16(d.d.DecodeInt(16)) - case *int32: - *v = int32(d.d.DecodeInt(32)) - case *int64: - *v = d.d.DecodeInt(64) - case *uint: - *v = uint(d.d.DecodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.DecodeUint(8)) - case *uint16: - *v = uint16(d.d.DecodeUint(16)) - case *uint32: - *v = uint32(d.d.DecodeUint(32)) - case *uint64: - *v = d.d.DecodeUint(64) - case *float32: - *v = float32(d.d.DecodeFloat(true)) - case *float64: - *v = d.d.DecodeFloat(false) - case *[]uint8: - *v = d.d.DecodeBytes(*v, false, false) - - case *Raw: - *v = d.raw() - - case *interface{}: - d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) - - default: - if !fastpathDecodeTypeSwitch(iv, d) { - d.decodeI(iv, true, false, false, false) - } - } -} - -func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { - if tryNil && d.d.TryDecodeAsNil() { - // No need to check if a ptr, recursively, to determine - // whether to set value to nil. - // Just always set value to its zero type. - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - return rv, true -} - -func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { - rv := reflect.ValueOf(iv) - if checkPtr { - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - } - rv, proceed := d.preDecodeValue(rv, tryNil) - if proceed { - fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, true); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, false); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i := range d.s { - v := &(d.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]*decFn, initCollectionCap) - } - fn = new(decFn) - d.f[rtid] = fn - } else { - if d.s == nil { - d.s = make([]decRtidFn, 0, initCollectionCap) - } - d.s = append(d.s, decRtidFn{rtid: rtid}) - fn = &(d.s[len(d.s)-1]).fn - } - - // debugf("\tCreating new dec fn for type: %v\n", rt) - ti := d.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.d = d - fi.ti = ti - - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if checkCodecSelfer && ti.cs { - fn.f = (*decFnInfo).selferUnmarshal - } else if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if rtid == rawTypId { - fn.f = (*decFnInfo).raw - } else if d.d.IsBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfFn := d.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*decFnInfo).ext - } else if supportMarshalInterfaces && d.be && ti.bunm { - fn.f = (*decFnInfo).binaryUnmarshal - } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { - //If JSON, we should check JSONUnmarshal before textUnmarshal - fn.f = (*decFnInfo).jsonUnmarshal - } else if supportMarshalInterfaces && !d.be && ti.tunm { - fn.f = (*decFnInfo).textUnmarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].decfn - } - } else { - // use mapping for underlying type if there - ok = false - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].decfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *decFnInfo, xrv reflect.Value) { - // xfnf(xf, xrv.Convert(xrt)) - xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Uintptr: - fn.f = (*decFnInfo).kUintptr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*decFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - } - - return -} - -func (d *Decoder) structFieldNotFound(index int, rvkencname string) { - // NOTE: rvkencname may be a stringView, so don't pass it to another function. - if d.h.ErrorIfNoField { - if index >= 0 { - d.errorf("no matching struct field found when decoding stream array at index %v", index) - return - } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) - return - } - } - d.swallow() -} - -func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { - if d.h.ErrorIfNoArrayExpand { - d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) - } -} - -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return - } - d.errNotValidPtrValue(rv) -} - -func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { - if !rv.IsValid() { - d.error(cannotDecodeIntoNilErr) - return - } - if !rv.CanInterface() { - d.errorf("cannot decode into a value without an interface: %v", rv) - return - } - rvi := rv.Interface() - d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) -} - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) errorf(format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = d.r.numread() - copy(params2[1:], params) - err := fmt.Errorf("[pos %d]: "+format, params2...) - panic(err) -} - -func (d *Decoder) string(v []byte) (s string) { - if d.is != nil { - s, ok := d.is[string(v)] // no allocation here. - if !ok { - s = string(v) - d.is[s] = s - } - return s - } - return string(v) // don't return stringView, as we need a real string here. -} - -func (d *Decoder) intern(s string) { - if d.is != nil { - d.is[s] = s - } -} - -// nextValueBytes returns the next value in the stream as a set of bytes. -func (d *Decoder) nextValueBytes() []byte { - d.d.uncacheRead() - d.r.track() - d.swallow() - return d.r.stopTrack() -} - -func (d *Decoder) raw() []byte { - // ensure that this is not a view into the bytes - // i.e. make new copy always. - bs := d.nextValueBytes() - bs2 := make([]byte, len(bs)) - copy(bs2, bs) - return bs2 -} - -// -------------------------------------------------- - -// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. -// A slice can be set from a map or array in stream. This supports the MapBySlice interface. -type decSliceHelper struct { - d *Decoder - // ct valueType - array bool -} - -func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { - dd := d.d - ctyp := dd.ContainerType() - if ctyp == valueTypeArray { - x.array = true - clen = dd.ReadArrayStart() - } else if ctyp == valueTypeMap { - clen = dd.ReadMapStart() * 2 - } else { - d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) - } - // x.ct = ctyp - x.d = d - return -} - -func (x decSliceHelper) End() { - cr := x.d.cr - if cr == nil { - return - } - if x.array { - cr.sendContainerState(containerArrayEnd) - } else { - cr.sendContainerState(containerMapEnd) - } -} - -func (x decSliceHelper) ElemContainerState(index int) { - cr := x.d.cr - if cr == nil { - return - } - if x.array { - cr.sendContainerState(containerArrayElem) - } else { - if index%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } -} - -func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) { - if clen == 0 { - return zeroByteSlice - } - if len(bs) == clen { - bsOut = bs - } else if cap(bs) >= clen { - bsOut = bs[:clen] - } else { - bsOut = make([]byte, clen) - } - r.readb(bsOut) - return -} - -func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { - if xlen := len(in); xlen > 0 { - if isBytesReader || xlen <= scratchByteArrayLen { - if cap(dest) >= xlen { - out = dest[:xlen] - } else { - out = make([]byte, xlen) - } - copy(out, in) - return - } - } - return in -} - -// decInferLen will infer a sensible length, given the following: -// - clen: length wanted. -// - maxlen: max length to be returned. -// if <= 0, it is unset, and we infer it based on the unit size -// - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - // handle when maxlen is not set i.e. <= 0 - if clen <= 0 { - return - } - if maxlen <= 0 { - // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. - // maxlen = 256 * 1024 / unit - // if maxlen < (4 * 1024) { - // maxlen = 4 * 1024 - // } - if unit < (256 / 4) { - maxlen = 256 * 1024 / unit - } else { - maxlen = 4 * 1024 - } - } - if clen > maxlen { - rvlen = maxlen - truncated = true - } else { - rvlen = clen - } - return - // if clen <= 0 { - // rvlen = 0 - // } else if maxlen > 0 && clen > maxlen { - // rvlen = maxlen - // truncated = true - // } else { - // rvlen = clen - // } - // return -} - -// // implement overall decReader wrapping both, for possible use inline: -// type decReaderT struct { -// bytes bool -// rb *bytesDecReader -// ri *ioDecReader -// } -// -// // implement *Decoder as a decReader. -// // Using decReaderT (defined just above) caused performance degradation -// // possibly because of constant copying the value, -// // and some value->interface conversion causing allocation. -// func (d *Decoder) unreadn1() { -// if d.bytes { -// d.rb.unreadn1() -// } else { -// d.ri.unreadn1() -// } -// } -// ... for other methods of decReader. -// Testing showed that performance improvement was negligible. diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/decode_go.go deleted file mode 100644 index ba289cef6..000000000 --- a/vendor/github.com/ugorji/go/codec/decode_go.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = true - -func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { - rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() - reflect.Copy(rvn2, rvn) - return -} diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/decode_go14.go deleted file mode 100644 index 50063bc8f..000000000 --- a/vendor/github.com/ugorji/go/codec/decode_go14.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = false - -func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { - panic("reflect.ArrayOf unsupported") -} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go deleted file mode 100644 index c2cef812e..000000000 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ /dev/null @@ -1,1461 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "fmt" - "io" - "reflect" - "sort" - "sync" -) - -const ( - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracts writing to a byte array or to an io.Writer. -type encWriter interface { - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - IsBuiltinType(rt uintptr) bool - EncodeBuiltin(rt uintptr, v interface{}) - EncodeNil() - EncodeInt(i int64) - EncodeUint(i uint64) - EncodeBool(b bool) - EncodeFloat32(f float32) - EncodeFloat64(f float64) - // encodeExtPreamble(xtag byte, length int) - EncodeRawExt(re *RawExt, e *Encoder) - EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) - EncodeArrayStart(length int) - EncodeMapStart(length int) - EncodeString(c charEncoding, v string) - EncodeSymbol(v string) - EncodeStringBytes(c charEncoding, v []byte) - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) - - reset() -} - -type encDriverAsis interface { - EncodeAsis(v []byte) -} - -type encNoSeparator struct{} - -func (_ encNoSeparator) EncodeEnd() {} - -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map - StructToArray bool - - // Canonical representation means that encoding a value will always result in the same - // sequence of bytes. - // - // This only affects maps, as the iteration order for maps is random. - // - // The implementation MAY use the natural sort order for the map keys if possible: - // - // - If there is a natural sort order (ie for number, bool, string or []byte keys), - // then the map keys are first sorted in natural order and then written - // with corresponding map values to the strema. - // - If there is no natural sort order, then the map keys will first be - // encoded into []byte, and then sorted, - // before writing the sorted keys and the corresponding map values to the stream. - // - Canonical bool - - // CheckCircularRef controls whether we check for circular references - // and error fast during an encode. - // - // If enabled, an error is received if a pointer to a struct - // references itself either directly or through one of its fields (iteratively). - // - // This is opt-in, as there may be a performance hit to checking circular references. - CheckCircularRef bool - - // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers - // when checking if a value is empty. - // - // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. - RecursiveEmptyCheck bool - - // Raw controls whether we encode Raw values. - // This is a "dangerous" option and must be explicitly set. - // If set, we blindly encode Raw values as-is, without checking - // if they are a correct representation of a value in that format. - // If unset, we error out. - Raw bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} - -// --------------------------------------------- - -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter - bs [1]byte -} - -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - // _, err = o.w.Write([]byte{c}) - o.bs[0] = c - _, err = o.w.Write(o.bs[:]) - return -} - -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - // return o.w.Write([]byte(s)) - return o.w.Write(bytesView(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w ioEncWriterWriter - s simpleIoEncWriterWriter - // x [8]byte // temp byte array re-used internally for efficiency -} - -func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { - panic(err) - } - if n != len(bs) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) - } -} - -func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { - panic(err) - } - if n != len(s) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) -} - -func (z *ioEncWriter) atEndOfEncode() {} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return - } - oc, a := z.growNoAlloc(len(s)) - if a { - z.growAlloc(len(s), oc) - } - copy(z.b[oc:], s) -} - -func (z *bytesEncWriter) writestr(s string) { - if len(s) == 0 { - return - } - oc, a := z.growNoAlloc(len(s)) - if a { - z.growAlloc(len(s), oc) - } - copy(z.b[oc:], s) -} - -func (z *bytesEncWriter) writen1(b1 byte) { - oc, a := z.growNoAlloc(1) - if a { - z.growAlloc(1, oc) - } - z.b[oc] = b1 -} - -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - oc, a := z.growNoAlloc(2) - if a { - z.growAlloc(2, oc) - } - z.b[oc+1] = b2 - z.b[oc] = b1 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -// have a growNoalloc(n int), which can be inlined. -// if allocation is needed, then call growAlloc(n int) - -func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { - oldcursor = z.c - z.c = z.c + n - if z.c > len(z.b) { - if z.c > cap(z.b) { - allocNeeded = true - } else { - z.b = z.b[:cap(z.b)] - } - } - return -} - -func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { - // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. - // bytes.Buffer model (2*cap + n): much better - // bs := make([]byte, 2*cap(z.b)+n) - bs := make([]byte, growCap(cap(z.b), 1, n)) - copy(bs, z.b[:oldcursor]) - z.b = bs -} - -// --------------------------------------------- - -type encFnInfo struct { - e *Encoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - -func (f *encFnInfo) builtin(rv reflect.Value) { - f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) -} - -func (f *encFnInfo) raw(rv reflect.Value) { - f.e.raw(rv.Interface().(Raw)) -} - -func (f *encFnInfo) rawExt(rv reflect.Value) { - // rev := rv.Interface().(RawExt) - // f.e.e.EncodeRawExt(&rev, f.e) - var re *RawExt - if rv.CanAddr() { - re = rv.Addr().Interface().(*RawExt) - } else { - rev := rv.Interface().(RawExt) - re = &rev - } - f.e.e.EncodeRawExt(re, f.e) -} - -func (f *encFnInfo) ext(rv reflect.Value) { - // if this is a struct|array and it was addressable, then pass the address directly (not the value) - if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { - rv = rv.Addr() - } - f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) -} - -func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { - if indir == 0 { - v = rv.Interface() - } else if indir == -1 { - // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addressable, else copy value to an addressable value. - if rv.CanAddr() { - v = rv.Addr().Interface() - } else { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) - } - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - rv = rv.Elem() - } - v = rv.Interface() - } - return v, true -} - -func (f *encFnInfo) selferMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { - v.(Selfer).CodecEncodeSelf(f.e) - } -} - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { - bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) - } -} - -func (f *encFnInfo) textMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { - // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) - bs, fnerr := v.(encoding.TextMarshaler).MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) - } -} - -func (f *encFnInfo) jsonMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { - bs, fnerr := v.(jsonMarshaler).MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) - } -} - -func (f *encFnInfo) kBool(rv reflect.Value) { - f.e.e.EncodeBool(rv.Bool()) -} - -func (f *encFnInfo) kString(rv reflect.Value) { - f.e.e.EncodeString(c_UTF8, rv.String()) -} - -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.e.e.EncodeFloat64(rv.Float()) -} - -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.e.e.EncodeFloat32(float32(rv.Float())) -} - -func (f *encFnInfo) kInt(rv reflect.Value) { - f.e.e.EncodeInt(rv.Int()) -} - -func (f *encFnInfo) kUint(rv reflect.Value) { - f.e.e.EncodeUint(rv.Uint()) -} - -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.e.e.EncodeNil() -} - -func (f *encFnInfo) kErr(rv reflect.Value) { - f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { - ti := f.ti - // array may be non-addressable, so we have to manage with care - // (don't call rv.Bytes, rv.Slice, etc). - // E.g. type struct S{B [2]byte}; - // Encode(S{}) will bomb on "panic: slice of unaddressable array". - e := f.e - if f.seq != seqTypeArray { - if rv.IsNil() { - e.e.EncodeNil() - return - } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if ti.rtid == uint8SliceTypId { - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - return - } - } - cr := e.cr - rtelem := ti.rt.Elem() - l := rv.Len() - if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { - switch f.seq { - case seqTypeArray: - // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else - if rv.CanAddr() { - e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) - } else { - var bs []byte - if l <= cap(e.b) { - bs = e.b[:l] - } else { - bs = make([]byte, l) - } - reflect.Copy(reflect.ValueOf(bs), rv) - // TODO: Test that reflect.Copy works instead of manual one-by-one - // for i := 0; i < l; i++ { - // bs[i] = byte(rv.Index(i).Uint()) - // } - e.e.EncodeStringBytes(c_RAW, bs) - } - case seqTypeSlice: - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - case seqTypeChan: - bs := e.b[:0] - // do not use range, so that the number of elements encoded - // does not change, and encoding does not hang waiting on someone to close chan. - // for b := range rv.Interface().(<-chan byte) { - // bs = append(bs, b) - // } - ch := rv.Interface().(<-chan byte) - for i := 0; i < l; i++ { - bs = append(bs, <-ch) - } - e.e.EncodeStringBytes(c_RAW, bs) - } - return - } - - if ti.mbs { - if l%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", l) - return - } - e.e.EncodeMapStart(l / 2) - } else { - e.e.EncodeArrayStart(l) - } - - if l > 0 { - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - // if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var fn *encFn - if rtelem.Kind() != reflect.Interface { - rtelemid := reflect.ValueOf(rtelem).Pointer() - fn = e.getEncFn(rtelemid, rtelem, true, true) - } - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - for j := 0; j < l; j++ { - if cr != nil { - if ti.mbs { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } else { - cr.sendContainerState(containerArrayElem) - } - } - if f.seq == seqTypeChan { - if rv2, ok2 := rv.Recv(); ok2 { - e.encodeValue(rv2, fn) - } else { - e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. - } - } else { - e.encodeValue(rv.Index(j), fn) - } - } - } - - if cr != nil { - if ti.mbs { - cr.sendContainerState(containerMapEnd) - } else { - cr.sendContainerState(containerArrayEnd) - } - } -} - -func (f *encFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - e := f.e - cr := e.cr - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - newlen := len(fti.sfi) - - // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of sync.Pool is less than the cost of new allocation. - pool, poolv, fkvs := encStructPoolGet(newlen) - - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - } - newlen = 0 - var kv stringRv - recur := e.h.RecursiveEmptyCheck - for _, si := range tisfi { - kv.r = si.field(rv, false) - if toMap { - if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { - continue - } - kv.v = si.encName - } else { - // use the zero value. - // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { - switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: - kv.r = reflect.Value{} //encode as nil - } - } - } - fkvs[newlen] = kv - newlen++ - } - - // debugf(">>>> kStruct: newlen: %v", newlen) - // sep := !e.be - ee := e.e //don't dereference every time - - if toMap { - ee.EncodeMapStart(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(kv.v) - } else { - ee.EncodeString(c_UTF8, kv.v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - } else { - ee.EncodeArrayStart(newlen) - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - } - - // do not use defer. Instead, use explicit pool return at end of function. - // defer has a cost we are trying to avoid. - // If there is a panic and these slices are not returned, it is ok. - if pool != nil { - pool.Put(poolv) - } -} - -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.e.e.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -// func (f *encFnInfo) kInterface(rv reflect.Value) { -// println("kInterface called") -// debug.PrintStack() -// if rv.IsNil() { -// f.e.e.EncodeNil() -// return -// } -// f.e.encodeValue(rv.Elem(), nil) -// } - -func (f *encFnInfo) kMap(rv reflect.Value) { - ee := f.e.e - if rv.IsNil() { - ee.EncodeNil() - return - } - - l := rv.Len() - ee.EncodeMapStart(l) - e := f.e - cr := e.cr - if l == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - var asSymbols bool - // determine the underlying key and val encFn's for the map. - // This eliminates some work which is done for each loop iteration i.e. - // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. - // - // However, if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var keyFn, valFn *encFn - ti := f.ti - rtkey := ti.rt.Key() - rtval := ti.rt.Elem() - rtkeyid := reflect.ValueOf(rtkey).Pointer() - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - var keyTypeIsString = rtkeyid == stringTypId - if keyTypeIsString { - asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } else { - for rtkey.Kind() == reflect.Ptr { - rtkey = rtkey.Elem() - } - if rtkey.Kind() != reflect.Interface { - rtkeyid = reflect.ValueOf(rtkey).Pointer() - keyFn = e.getEncFn(rtkeyid, rtkey, true, true) - } - } - for rtval.Kind() == reflect.Ptr { - rtval = rtval.Elem() - } - if rtval.Kind() != reflect.Interface { - rtvalid := reflect.ValueOf(rtval).Pointer() - valFn = e.getEncFn(rtvalid, rtval, true, true) - } - mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { - - if e.h.Canonical { - e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) - } else { - for j := range mks { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if keyTypeIsString { - if asSymbols { - ee.EncodeSymbol(mks[j].String()) - } else { - ee.EncodeString(c_UTF8, mks[j].String()) - } - } else { - e.encodeValue(mks[j], keyFn) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mks[j]), valFn) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { - ee := e.e - cr := e.cr - // we previously did out-of-band if an extension was registered. - // This is not necessary, as the natural kind is sufficient for ordering. - - if rtkeyid == uint8SliceTypId { - mksv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bytes() - } - sort.Sort(bytesRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeStringBytes(c_RAW, mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - } else { - switch rtkey.Kind() { - case reflect.Bool: - mksv := make([]boolRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bool() - } - sort.Sort(boolRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.String: - mksv := make([]stringRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.String() - } - sort.Sort(stringRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(mksv[i].v) - } else { - ee.EncodeString(c_UTF8, mksv[i].v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uintRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Uint() - } - sort.Sort(uintRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]intRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Int() - } - sort.Sort(intRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Float32: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(mksv[i].v)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Float64: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - default: - // out-of-band - // first encode each key to a []byte first, then sort them, then record - var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - mksbv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksbv[i] - l := len(mksv) - e2.MustEncode(k) - v.r = k - v.v = mksv[l:] - // fmt.Printf(">>>>> %s\n", mksv[l:]) - } - sort.Sort(bytesRvSlice(mksbv)) - for j := range mksbv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(mksbv[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) - } - } - } -} - -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -type encRtidFn struct { - rtid uintptr - fn encFn -} - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder - e encDriver - // NOTE: Encoder shouldn't call it's write methods, - // as the handler MAY need to do some coordination. - w encWriter - s []encRtidFn - ci set - be bool // is binary encoding - js bool // is json handle - - wi ioEncWriter - wb bytesEncWriter - - h *BasicHandle - hh Handle - - cr containerStateRecv - as encDriverAsis - - f map[uintptr]*encFn - b [scratchByteArrayLen]byte -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - e := newEncoder(h) - e.Reset(w) - return e -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - e := newEncoder(h) - e.ResetBytes(out) - return e -} - -func newEncoder(h Handle) *Encoder { - e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - _, e.js = h.(*JsonHandle) - e.e = h.newEncDriver(e) - e.as, _ = e.e.(encDriverAsis) - e.cr, _ = e.e.(containerStateRecv) - return e -} - -// Reset the Encoder with a new output stream. -// -// This accommodates using the state of the Encoder, -// where it has "cached" information about sub-engines. -func (e *Encoder) Reset(w io.Writer) { - ww, ok := w.(ioEncWriterWriter) - if ok { - e.wi.w = ww - } else { - sww := &e.wi.s - sww.w = w - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - e.wi.w = sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - e.w = &e.wi - e.e.reset() -} - -func (e *Encoder) ResetBytes(out *[]byte) { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - e.wb.b, e.wb.out, e.wb.c = in, out, 0 - e.w = &e.wb - e.e.reset() -} - -// func (e *Encoder) sendContainerState(c containerState) { -// if e.cr != nil { -// e.cr.sendContainerState(c) -// } -// } - -// Encode writes an object into a stream. -// -// Encoding can be configured via the struct tag for the fields. -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// Note that the "json" key is used in the absence of the "codec" key. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's tag is "-", OR -// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the tag on the _struct field sets the "toarray" option -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline except: -// - the struct tag specifies a replacement name (first value) -// - the field is of an interface type -// -// Examples: -// -// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// io.Reader //use key "Reader". -// MyStruct `codec:"my1" //use key "my1". -// MyStruct //inline it -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If a Selfer, call its CodecEncodeSelf method -// - If an extension is registered for it, call that extension function -// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() - return -} - -// MustEncode is like Encode, but panics if unable to Encode. -// This provides insight to the code location that triggered the error. -func (e *Encoder) MustEncode(v interface{}) { - e.encode(v) - e.w.atEndOfEncode() -} - -func (e *Encoder) encode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecEncodeSelf(e) - // return - // } - - switch v := iv.(type) { - case nil: - e.e.EncodeNil() - case Selfer: - v.CodecEncodeSelf(e) - case Raw: - e.raw(v) - case reflect.Value: - e.encodeValue(v, nil) - - case string: - e.e.EncodeString(c_UTF8, v) - case bool: - e.e.EncodeBool(v) - case int: - e.e.EncodeInt(int64(v)) - case int8: - e.e.EncodeInt(int64(v)) - case int16: - e.e.EncodeInt(int64(v)) - case int32: - e.e.EncodeInt(int64(v)) - case int64: - e.e.EncodeInt(v) - case uint: - e.e.EncodeUint(uint64(v)) - case uint8: - e.e.EncodeUint(uint64(v)) - case uint16: - e.e.EncodeUint(uint64(v)) - case uint32: - e.e.EncodeUint(uint64(v)) - case uint64: - e.e.EncodeUint(v) - case float32: - e.e.EncodeFloat32(v) - case float64: - e.e.EncodeFloat64(v) - - case []uint8: - e.e.EncodeStringBytes(c_RAW, v) - - case *string: - e.e.EncodeString(c_UTF8, *v) - case *bool: - e.e.EncodeBool(*v) - case *int: - e.e.EncodeInt(int64(*v)) - case *int8: - e.e.EncodeInt(int64(*v)) - case *int16: - e.e.EncodeInt(int64(*v)) - case *int32: - e.e.EncodeInt(int64(*v)) - case *int64: - e.e.EncodeInt(*v) - case *uint: - e.e.EncodeUint(uint64(*v)) - case *uint8: - e.e.EncodeUint(uint64(*v)) - case *uint16: - e.e.EncodeUint(uint64(*v)) - case *uint32: - e.e.EncodeUint(uint64(*v)) - case *uint64: - e.e.EncodeUint(*v) - case *float32: - e.e.EncodeFloat32(*v) - case *float64: - e.e.EncodeFloat64(*v) - - case *[]uint8: - e.e.EncodeStringBytes(c_RAW, *v) - - default: - const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer - if !fastpathEncodeTypeSwitch(iv, e) { - e.encodeI(iv, false, checkCodecSelfer1) - } - } -} - -func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { - // use a goto statement instead of a recursive function for ptr/interface. -TOP: - switch rv.Kind() { - case reflect.Ptr: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { - // TODO: Movable pointers will be an issue here. Future problem. - sptr = rv.UnsafeAddr() - break TOP - } - goto TOP - case reflect.Interface: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - goto TOP - case reflect.Slice, reflect.Map: - if rv.IsNil() { - e.e.EncodeNil() - return - } - case reflect.Invalid, reflect.Func: - e.e.EncodeNil() - return - } - - proceed = true - rv2 = rv - return -} - -func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, - checkFastpath, checkCodecSelfer bool) { - if sptr != 0 { - if (&e.ci).add(sptr) { - e.errorf("circular reference found: # %d", sptr) - } - } - if fn == nil { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - // fn = e.getEncFn(rtid, rt, true, true) - fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) - } - fn.f(&fn.i, rv) - if sptr != 0 { - (&e.ci).remove(sptr) - } -} - -func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { - if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { - e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) - } -} - -func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { - // if a valid fn is passed, it MUST BE for the dereferenced type of rv - if rv, sptr, proceed := e.preEncodeValue(rv); proceed { - e.doEncodeValue(rv, fn, sptr, true, true) - } -} - -func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { - // rtid := reflect.ValueOf(rt).Pointer() - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i := range e.s { - v := &(e.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]*encFn, initCollectionCap) - } - fn = new(encFn) - e.f[rtid] = fn - } else { - if e.s == nil { - e.s = make([]encRtidFn, 0, initCollectionCap) - } - e.s = append(e.s, encRtidFn{rtid: rtid}) - fn = &(e.s[len(e.s)-1]).fn - } - - ti := e.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.e = e - fi.ti = ti - - if checkCodecSelfer && ti.cs { - fn.f = (*encFnInfo).selferMarshal - } else if rtid == rawTypId { - fn.f = (*encFnInfo).raw - } else if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.IsBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfFn := e.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*encFnInfo).ext - } else if supportMarshalInterfaces && e.be && ti.bm { - fn.f = (*encFnInfo).binaryMarshal - } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { - //If JSON, we should check JSONMarshal before textMarshal - fn.f = (*encFnInfo).jsonMarshal - } else if supportMarshalInterfaces && !e.be && ti.tm { - fn.f = (*encFnInfo).textMarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { // un-named slice or map - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].encfn - } - } else { - ok = false - // use mapping for underlying type if there - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].encfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *encFnInfo, xrv reflect.Value) { - xfnf(xf, xrv.Convert(xrt)) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*encFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*encFnInfo).kSlice - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // reflect.Ptr and reflect.Interface are handled already by preEncodeValue - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - // case reflect.Interface: - // fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - } - - return -} - -func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - e.e.EncodeNil() - } else if asis { - e.asis(bs) - } else { - e.e.EncodeStringBytes(c, bs) - } -} - -func (e *Encoder) asis(v []byte) { - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) raw(vv Raw) { - v := []byte(vv) - if !e.h.Raw { - e.errorf("Raw values cannot be encoded: %v", v) - } - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) errorf(format string, params ...interface{}) { - err := fmt.Errorf(format, params...) - panic(err) -} - -// ---------------------------------------- - -const encStructPoolLen = 5 - -// encStructPool is an array of sync.Pool. -// Each element of the array pools one of encStructPool(8|16|32|64). -// It allows the re-use of slices up to 64 in length. -// A performance cost of encoding structs was collecting -// which values were empty and should be omitted. -// We needed slices of reflect.Value and string to collect them. -// This shared pool reduces the amount of unnecessary creation we do. -// The cost is that of locking sometimes, but sync.Pool is efficient -// enough to reduce thread contention. -var encStructPool [encStructPoolLen]sync.Pool - -func init() { - encStructPool[0].New = func() interface{} { return new([8]stringRv) } - encStructPool[1].New = func() interface{} { return new([16]stringRv) } - encStructPool[2].New = func() interface{} { return new([32]stringRv) } - encStructPool[3].New = func() interface{} { return new([64]stringRv) } - encStructPool[4].New = func() interface{} { return new([128]stringRv) } -} - -func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { - // if encStructPoolLen != 5 { // constant chec, so removed at build time. - // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed - // } - // idxpool := newlen / 8 - if newlen <= 8 { - p = &encStructPool[0] - v = p.Get() - s = v.(*[8]stringRv)[:newlen] - } else if newlen <= 16 { - p = &encStructPool[1] - v = p.Get() - s = v.(*[16]stringRv)[:newlen] - } else if newlen <= 32 { - p = &encStructPool[2] - v = p.Get() - s = v.(*[32]stringRv)[:newlen] - } else if newlen <= 64 { - p = &encStructPool[3] - v = p.Get() - s = v.(*[64]stringRv)[:newlen] - } else if newlen <= 128 { - p = &encStructPool[4] - v = p.Get() - s = v.(*[128]stringRv)[:newlen] - } else { - s = make([]stringRv, newlen) - } - return -} - -// ---------------------------------------- - -// func encErr(format string, params ...interface{}) { -// doPanic(msgTagEnc, format, params...) -// } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go deleted file mode 100644 index f2e5d2dcf..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ /dev/null @@ -1,39352 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct{} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [271]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, 271 // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < 271 && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) - fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) - fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) - fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) - fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) - fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) - fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) - fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) - fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) - fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) - fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) - fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) - fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) - fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) - fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) - - fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) - fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) - fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) - fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) - fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) - fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) - fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) - fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) - fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) - fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) - fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) - fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) - fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) - fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) - fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) - fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) - fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) - fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) - fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) - fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) - fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) - fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) - fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) - fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) - fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) - fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) - fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) - fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) - fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) - fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) - fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) - fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) - fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) - fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) - fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) - fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) - fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) - fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) - fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) - fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) - fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) - fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) - fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) - fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) - fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) - fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) - fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) - fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) - fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) - fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) - fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) - fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) - fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) - fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) - fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) - fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) - fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) - fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) - fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) - fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) - fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) - fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) - fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) - fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) - fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) - fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) - fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) - fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) - fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) - fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) - fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) - fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) - fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) - fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) - fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) - fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) - fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) - fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) - fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) - fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) - fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) - fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) - fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) - fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) - fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) - fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) - fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) - fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) - fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) - fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) - fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) - fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) - fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) - fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) - fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) - fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) - fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) - fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) - fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) - fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) - fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) - fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) - fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) - fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) - fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) - fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) - fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) - fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) - fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) - fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) - fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) - fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) - fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) - fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) - fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) - fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) - fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) - fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) - fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) - fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) - fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) - fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) - fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) - fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) - fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) - fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) - fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) - fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) - fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) - fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) - fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) - fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) - fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) - fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) - fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) - fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) - fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) - fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) - fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) - fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) - fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) - fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) - fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) - fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) - fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) - fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) - fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) - fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) - fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) - fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) - fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) - fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) - fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) - fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) - fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) - fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) - fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) - fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) - fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) - fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) - fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) - fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) - fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) - fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) - fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) - fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) - fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) - fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) - fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) - fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) - fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) - fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) - fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) - fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) - fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) - fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) - fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) - fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) - fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) - fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) - fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) - fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) - fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) - fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) - fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) - fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) - fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) - fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) - fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) - fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) - fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) - fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) - fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) - fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) - fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) - fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) - fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) - fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) - fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) - fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) - fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) - fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) - fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) - fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) - fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) - fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) - fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) - fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) - fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) - fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) - fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) - fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) - fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) - fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) - fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) - fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) - fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) - fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) - fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) - fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) - fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) - fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) - fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) - fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) - fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) - fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) - fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) - fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) - fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) - fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) - fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) - fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) - fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) - fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) - fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) - fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) - fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) - fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) - fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) - fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) - fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) - fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) - fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) - fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeString(c_UTF8, v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeString(c_UTF8, v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeFloat32(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeFloat32(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeFloat64(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeFloat64(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeBool(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeBool(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { - fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { - fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { - fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { - fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { - fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { - fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { - fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { - fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { - fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { - fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { - fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { - fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { - fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { - fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { - fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { - fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { - fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { - fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { - fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { - fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { - fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { - fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { - fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { - fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { - fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { - fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { - fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { - fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { - fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { - fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { - fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { - fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { - fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { - fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { - fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { - fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { - fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { - fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { - fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { - fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { - fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { - fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { - fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { - fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { - fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { - fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { - fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { - fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { - fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { - fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { - fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { - fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { - fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { - fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { - fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { - fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { - fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { - fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { - fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { - fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { - fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { - fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { - fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { - fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { - fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { - fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { - fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { - fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { - fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { - fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { - fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { - fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { - fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { - fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { - fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { - fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { - fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { - fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { - fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { - fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { - fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { - fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { - fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { - fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { - fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { - fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { - fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { - fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { - fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { - fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { - fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { - fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { - fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { - fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { - fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { - fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { - fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { - fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { - fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { - fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { - fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { - fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { - fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { - fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { - fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { - fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { - fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { - fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { - fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { - fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { - fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { - fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { - fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { - fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { - fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { - fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { - fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { - fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { - fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { - fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { - fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { - fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { - fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { - fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { - fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { - fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { - fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { - fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) - case *[]interface{}: - v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]interface{}: - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]interface{}: - v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]string: - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]string: - v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint: - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint: - v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint8: - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint8: - v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint16: - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint16: - v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint32: - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint32: - v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint64: - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint64: - v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uintptr: - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uintptr: - v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int: - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int: - v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int8: - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int8: - v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int16: - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int16: - v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int32: - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int32: - v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int64: - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int64: - v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]float32: - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]float32: - v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]float64: - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]float64: - v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]bool: - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]bool: - v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []string: - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) - case *[]string: - v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]interface{}: - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) - case *map[string]interface{}: - v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]string: - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) - case *map[string]string: - v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint: - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) - case *map[string]uint: - v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint8: - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint8: - v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint16: - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint16: - v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint32: - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint32: - v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint64: - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint64: - v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uintptr: - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[string]uintptr: - v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int: - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) - case *map[string]int: - v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int8: - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) - case *map[string]int8: - v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int16: - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) - case *map[string]int16: - v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int32: - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) - case *map[string]int32: - v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int64: - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) - case *map[string]int64: - v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]float32: - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[string]float32: - v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]float64: - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[string]float64: - v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]bool: - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) - case *map[string]bool: - v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []float32: - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) - case *[]float32: - v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]interface{}: - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[float32]interface{}: - v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]string: - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) - case *map[float32]string: - v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint: - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint: - v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint8: - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint8: - v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint16: - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint16: - v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint32: - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint32: - v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint64: - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint64: - v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uintptr: - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[float32]uintptr: - v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int: - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) - case *map[float32]int: - v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int8: - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int8: - v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int16: - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int16: - v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int32: - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int32: - v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int64: - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int64: - v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]float32: - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]float32: - v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]float64: - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]float64: - v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]bool: - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[float32]bool: - v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []float64: - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) - case *[]float64: - v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]interface{}: - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[float64]interface{}: - v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]string: - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) - case *map[float64]string: - v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint: - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint: - v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint8: - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint8: - v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint16: - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint16: - v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint32: - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint32: - v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint64: - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint64: - v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uintptr: - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[float64]uintptr: - v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int: - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) - case *map[float64]int: - v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int8: - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int8: - v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int16: - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int16: - v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int32: - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int32: - v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int64: - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int64: - v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]float32: - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]float32: - v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]float64: - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]float64: - v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]bool: - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[float64]bool: - v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint: - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) - case *[]uint: - v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]interface{}: - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint]interface{}: - v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]string: - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) - case *map[uint]string: - v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint: - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint: - v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint8: - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint8: - v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint16: - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint16: - v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint32: - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint32: - v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint64: - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint64: - v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uintptr: - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint]uintptr: - v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int: - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) - case *map[uint]int: - v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int8: - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int8: - v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int16: - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int16: - v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int32: - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int32: - v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int64: - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int64: - v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]float32: - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]float32: - v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]float64: - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]float64: - v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]bool: - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint]bool: - v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]interface{}: - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]interface{}: - v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]string: - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]string: - v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint: - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint: - v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint8: - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint8: - v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint16: - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint16: - v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint32: - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint32: - v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint64: - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint64: - v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uintptr: - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uintptr: - v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int: - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int: - v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int8: - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int8: - v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int16: - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int16: - v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int32: - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int32: - v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int64: - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int64: - v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]float32: - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]float32: - v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]float64: - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]float64: - v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]bool: - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]bool: - v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint16: - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) - case *[]uint16: - v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]interface{}: - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]interface{}: - v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]string: - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]string: - v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint: - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint: - v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint8: - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint8: - v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint16: - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint16: - v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint32: - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint32: - v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint64: - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint64: - v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uintptr: - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uintptr: - v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int: - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int: - v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int8: - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int8: - v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int16: - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int16: - v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int32: - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int32: - v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int64: - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int64: - v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]float32: - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]float32: - v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]float64: - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]float64: - v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]bool: - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]bool: - v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint32: - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) - case *[]uint32: - v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]interface{}: - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]interface{}: - v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]string: - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]string: - v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint: - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint: - v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint8: - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint8: - v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint16: - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint16: - v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint32: - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint32: - v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint64: - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint64: - v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uintptr: - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uintptr: - v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int: - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int: - v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int8: - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int8: - v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int16: - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int16: - v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int32: - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int32: - v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int64: - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int64: - v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]float32: - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]float32: - v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]float64: - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]float64: - v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]bool: - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]bool: - v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint64: - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) - case *[]uint64: - v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]interface{}: - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]interface{}: - v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]string: - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]string: - v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint: - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint: - v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint8: - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint8: - v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint16: - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint16: - v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint32: - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint32: - v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint64: - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint64: - v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uintptr: - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uintptr: - v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int: - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int: - v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int8: - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int8: - v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int16: - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int16: - v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int32: - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int32: - v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int64: - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int64: - v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]float32: - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]float32: - v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]float64: - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]float64: - v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]bool: - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]bool: - v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uintptr: - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) - case *[]uintptr: - v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]interface{}: - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]interface{}: - v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]string: - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]string: - v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint: - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint: - v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint8: - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint8: - v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint16: - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint16: - v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint32: - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint32: - v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint64: - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint64: - v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uintptr: - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uintptr: - v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int: - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int: - v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int8: - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int8: - v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int16: - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int16: - v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int32: - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int32: - v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int64: - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int64: - v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]float32: - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]float32: - v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]float64: - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]float64: - v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]bool: - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]bool: - v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int: - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) - case *[]int: - v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]interface{}: - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) - case *map[int]interface{}: - v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]string: - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) - case *map[int]string: - v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint: - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) - case *map[int]uint: - v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint8: - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint8: - v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint16: - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint16: - v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint32: - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint32: - v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint64: - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint64: - v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uintptr: - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int]uintptr: - v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int: - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) - case *map[int]int: - v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int8: - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) - case *map[int]int8: - v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int16: - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) - case *map[int]int16: - v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int32: - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) - case *map[int]int32: - v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int64: - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) - case *map[int]int64: - v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]float32: - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[int]float32: - v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]float64: - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[int]float64: - v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]bool: - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) - case *map[int]bool: - v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int8: - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) - case *[]int8: - v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]interface{}: - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int8]interface{}: - v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]string: - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) - case *map[int8]string: - v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint: - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint: - v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint8: - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint8: - v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint16: - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint16: - v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint32: - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint32: - v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint64: - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint64: - v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uintptr: - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int8]uintptr: - v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int: - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) - case *map[int8]int: - v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int8: - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int8: - v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int16: - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int16: - v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int32: - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int32: - v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int64: - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int64: - v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]float32: - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]float32: - v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]float64: - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]float64: - v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]bool: - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int8]bool: - v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int16: - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) - case *[]int16: - v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]interface{}: - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int16]interface{}: - v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]string: - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) - case *map[int16]string: - v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint: - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint: - v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint8: - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint8: - v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint16: - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint16: - v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint32: - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint32: - v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint64: - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint64: - v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uintptr: - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int16]uintptr: - v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int: - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) - case *map[int16]int: - v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int8: - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int8: - v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int16: - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int16: - v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int32: - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int32: - v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int64: - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int64: - v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]float32: - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]float32: - v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]float64: - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]float64: - v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]bool: - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int16]bool: - v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int32: - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) - case *[]int32: - v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]interface{}: - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int32]interface{}: - v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]string: - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) - case *map[int32]string: - v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint: - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint: - v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint8: - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint8: - v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint16: - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint16: - v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint32: - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint32: - v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint64: - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint64: - v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uintptr: - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int32]uintptr: - v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int: - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) - case *map[int32]int: - v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int8: - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int8: - v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int16: - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int16: - v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int32: - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int32: - v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int64: - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int64: - v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]float32: - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]float32: - v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]float64: - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]float64: - v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]bool: - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int32]bool: - v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int64: - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) - case *[]int64: - v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]interface{}: - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int64]interface{}: - v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]string: - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) - case *map[int64]string: - v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint: - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint: - v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint8: - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint8: - v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint16: - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint16: - v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint32: - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint32: - v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint64: - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint64: - v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uintptr: - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int64]uintptr: - v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int: - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) - case *map[int64]int: - v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int8: - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int8: - v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int16: - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int16: - v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int32: - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int32: - v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int64: - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int64: - v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]float32: - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]float32: - v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]float64: - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]float64: - v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]bool: - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int64]bool: - v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []bool: - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) - case *[]bool: - v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]interface{}: - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) - case *map[bool]interface{}: - v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]string: - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) - case *map[bool]string: - v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint: - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint: - v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint8: - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint8: - v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint16: - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint16: - v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint32: - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint32: - v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint64: - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint64: - v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uintptr: - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[bool]uintptr: - v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int: - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) - case *map[bool]int: - v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int8: - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int8: - v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int16: - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int16: - v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int32: - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int32: - v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int64: - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int64: - v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]float32: - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]float32: - v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]float64: - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]float64: - v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]bool: - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) - case *map[bool]bool: - v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]interface{}) - v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]interface{}) - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]interface{}, xlen) - } - } else { - v = make([]interface{}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - d.decode(&v[j]) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, nil) - slh.ElemContainerState(j) - d.decode(&v[j]) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]interface{}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, nil) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - d.decode(&v[j]) - - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]string) - v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]string) - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { - v, changed := f.DecSliceStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]string, xlen) - } - } else { - v = make([]string, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, "") - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]string, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, "") - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeString() - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float32) - v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]float32) - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float32, xlen) - } - } else { - v = make([]float32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]float32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = float32(dd.DecodeFloat(true)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float64) - v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]float64) - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float64, xlen) - } - } else { - v = make([]float64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]float64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeFloat(false) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint) - v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint) - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint, xlen) - } - } else { - v = make([]uint, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint(dd.DecodeUint(uintBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint16) - v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint16) - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint16, xlen) - } - } else { - v = make([]uint16, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint16, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint16(dd.DecodeUint(16)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint32) - v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint32) - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint32, xlen) - } - } else { - v = make([]uint32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint32(dd.DecodeUint(32)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint64) - v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint64) - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint64, xlen) - } - } else { - v = make([]uint64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeUint(64) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uintptr) - v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uintptr) - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uintptr, xlen) - } - } else { - v = make([]uintptr, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uintptr, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int) - v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int) - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int, xlen) - } - } else { - v = make([]int, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int(dd.DecodeInt(intBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int8) - v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int8) - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int8, xlen) - } - } else { - v = make([]int8, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int8, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int8(dd.DecodeInt(8)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int16) - v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int16) - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int16, xlen) - } - } else { - v = make([]int16, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int16, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int16(dd.DecodeInt(16)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int32) - v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int32) - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int32, xlen) - } - } else { - v = make([]int32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int32(dd.DecodeInt(32)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int64) - v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int64) - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int64, xlen) - } - } else { - v = make([]int64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeInt(64) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]bool) - v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]bool) - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { - v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]bool, xlen) - } - } else { - v = make([]bool, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, false) - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]bool, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, false) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeBool() - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]interface{}) - v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]interface{}) - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk interface{} - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]string) - v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]string) - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]string, xlen) - changed = true - } - - var mk interface{} - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint) - v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint) - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint, xlen) - changed = true - } - - var mk interface{} - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint8) - v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint8) - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]uint8, xlen) - changed = true - } - - var mk interface{} - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint16) - v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint16) - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]uint16, xlen) - changed = true - } - - var mk interface{} - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint32) - v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint32) - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]uint32, xlen) - changed = true - } - - var mk interface{} - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint64) - v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint64) - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint64, xlen) - changed = true - } - - var mk interface{} - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uintptr) - v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uintptr) - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uintptr, xlen) - changed = true - } - - var mk interface{} - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int) - v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int) - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int, xlen) - changed = true - } - - var mk interface{} - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int8) - v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int8) - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]int8, xlen) - changed = true - } - - var mk interface{} - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int16) - v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int16) - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]int16, xlen) - changed = true - } - - var mk interface{} - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int32) - v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int32) - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]int32, xlen) - changed = true - } - - var mk interface{} - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int64) - v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int64) - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int64, xlen) - changed = true - } - - var mk interface{} - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float32) - v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]float32) - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]float32, xlen) - changed = true - } - - var mk interface{} - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float64) - v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]float64) - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]float64, xlen) - changed = true - } - - var mk interface{} - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]bool) - v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]bool) - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]bool, xlen) - changed = true - } - - var mk interface{} - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]interface{}) - v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]interface{}) - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[string]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk string - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]string) - v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]string) - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, - d *Decoder) (_ map[string]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]string, xlen) - changed = true - } - - var mk string - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint) - v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint) - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint, xlen) - changed = true - } - - var mk string - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint8) - v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint8) - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]uint8, xlen) - changed = true - } - - var mk string - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint16) - v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint16) - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]uint16, xlen) - changed = true - } - - var mk string - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint32) - v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint32) - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]uint32, xlen) - changed = true - } - - var mk string - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint64) - v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint64) - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint64, xlen) - changed = true - } - - var mk string - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uintptr) - v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uintptr) - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uintptr, xlen) - changed = true - } - - var mk string - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int) - v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int) - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int, xlen) - changed = true - } - - var mk string - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int8) - v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int8) - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]int8, xlen) - changed = true - } - - var mk string - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int16) - v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int16) - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]int16, xlen) - changed = true - } - - var mk string - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int32) - v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int32) - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]int32, xlen) - changed = true - } - - var mk string - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int64) - v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int64) - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int64, xlen) - changed = true - } - - var mk string - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float32) - v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]float32) - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]float32, xlen) - changed = true - } - - var mk string - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float64) - v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]float64) - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]float64, xlen) - changed = true - } - - var mk string - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]bool) - v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]bool) - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[string]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]bool, xlen) - changed = true - } - - var mk string - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]interface{}) - v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]interface{}) - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]string) - v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]string) - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]string, xlen) - changed = true - } - - var mk float32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint) - v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint) - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint, xlen) - changed = true - } - - var mk float32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint8) - v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint8) - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]uint8, xlen) - changed = true - } - - var mk float32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint16) - v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint16) - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]uint16, xlen) - changed = true - } - - var mk float32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint32) - v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint32) - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]uint32, xlen) - changed = true - } - - var mk float32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint64) - v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint64) - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint64, xlen) - changed = true - } - - var mk float32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uintptr) - v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uintptr) - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uintptr, xlen) - changed = true - } - - var mk float32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int) - v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int) - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int, xlen) - changed = true - } - - var mk float32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int8) - v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int8) - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]int8, xlen) - changed = true - } - - var mk float32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int16) - v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int16) - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]int16, xlen) - changed = true - } - - var mk float32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int32) - v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int32) - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]int32, xlen) - changed = true - } - - var mk float32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int64) - v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int64) - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int64, xlen) - changed = true - } - - var mk float32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float32) - v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]float32) - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]float32, xlen) - changed = true - } - - var mk float32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float64) - v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]float64) - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]float64, xlen) - changed = true - } - - var mk float32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]bool) - v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]bool) - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]bool, xlen) - changed = true - } - - var mk float32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]interface{}) - v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]interface{}) - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]string) - v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]string) - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]string, xlen) - changed = true - } - - var mk float64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint) - v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint) - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint, xlen) - changed = true - } - - var mk float64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint8) - v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint8) - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]uint8, xlen) - changed = true - } - - var mk float64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint16) - v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint16) - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]uint16, xlen) - changed = true - } - - var mk float64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint32) - v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint32) - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]uint32, xlen) - changed = true - } - - var mk float64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint64) - v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint64) - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint64, xlen) - changed = true - } - - var mk float64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uintptr) - v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uintptr) - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uintptr, xlen) - changed = true - } - - var mk float64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int) - v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int) - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int, xlen) - changed = true - } - - var mk float64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int8) - v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int8) - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]int8, xlen) - changed = true - } - - var mk float64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int16) - v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int16) - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]int16, xlen) - changed = true - } - - var mk float64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int32) - v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int32) - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]int32, xlen) - changed = true - } - - var mk float64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int64) - v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int64) - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int64, xlen) - changed = true - } - - var mk float64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float32) - v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]float32) - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]float32, xlen) - changed = true - } - - var mk float64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float64) - v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]float64) - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]float64, xlen) - changed = true - } - - var mk float64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]bool) - v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]bool) - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]bool, xlen) - changed = true - } - - var mk float64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]interface{}) - v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]interface{}) - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]string) - v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]string) - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]string, xlen) - changed = true - } - - var mk uint - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint) - v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint) - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint, xlen) - changed = true - } - - var mk uint - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint8) - v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint8) - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]uint8, xlen) - changed = true - } - - var mk uint - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint16) - v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint16) - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]uint16, xlen) - changed = true - } - - var mk uint - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint32) - v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint32) - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]uint32, xlen) - changed = true - } - - var mk uint - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint64) - v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint64) - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint64, xlen) - changed = true - } - - var mk uint - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uintptr) - v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uintptr) - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uintptr, xlen) - changed = true - } - - var mk uint - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int) - v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int) - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int, xlen) - changed = true - } - - var mk uint - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int8) - v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int8) - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]int8, xlen) - changed = true - } - - var mk uint - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int16) - v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int16) - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]int16, xlen) - changed = true - } - - var mk uint - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int32) - v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int32) - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]int32, xlen) - changed = true - } - - var mk uint - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int64) - v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int64) - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int64, xlen) - changed = true - } - - var mk uint - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float32) - v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]float32) - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]float32, xlen) - changed = true - } - - var mk uint - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float64) - v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]float64) - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]float64, xlen) - changed = true - } - - var mk uint - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]bool) - v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]bool) - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]bool, xlen) - changed = true - } - - var mk uint - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]interface{}) - v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]interface{}) - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint8 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]string) - v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]string) - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]string, xlen) - changed = true - } - - var mk uint8 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint) - v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint) - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint, xlen) - changed = true - } - - var mk uint8 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint8) - v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint8) - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]uint8, xlen) - changed = true - } - - var mk uint8 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint16) - v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint16) - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]uint16, xlen) - changed = true - } - - var mk uint8 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint32) - v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint32) - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]uint32, xlen) - changed = true - } - - var mk uint8 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint64) - v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint64) - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint64, xlen) - changed = true - } - - var mk uint8 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uintptr) - v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uintptr) - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uintptr, xlen) - changed = true - } - - var mk uint8 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int) - v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int) - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int, xlen) - changed = true - } - - var mk uint8 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int8) - v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int8) - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]int8, xlen) - changed = true - } - - var mk uint8 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int16) - v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int16) - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]int16, xlen) - changed = true - } - - var mk uint8 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int32) - v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int32) - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]int32, xlen) - changed = true - } - - var mk uint8 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int64) - v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int64) - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int64, xlen) - changed = true - } - - var mk uint8 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float32) - v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]float32) - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]float32, xlen) - changed = true - } - - var mk uint8 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float64) - v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]float64) - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]float64, xlen) - changed = true - } - - var mk uint8 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]bool) - v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]bool) - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]bool, xlen) - changed = true - } - - var mk uint8 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]interface{}) - v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]interface{}) - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint16 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]string) - v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]string) - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]string, xlen) - changed = true - } - - var mk uint16 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint) - v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint) - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint, xlen) - changed = true - } - - var mk uint16 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint8) - v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint8) - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]uint8, xlen) - changed = true - } - - var mk uint16 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint16) - v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint16) - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]uint16, xlen) - changed = true - } - - var mk uint16 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint32) - v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint32) - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]uint32, xlen) - changed = true - } - - var mk uint16 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint64) - v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint64) - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint64, xlen) - changed = true - } - - var mk uint16 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uintptr) - v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uintptr) - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uintptr, xlen) - changed = true - } - - var mk uint16 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int) - v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int) - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int, xlen) - changed = true - } - - var mk uint16 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int8) - v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int8) - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]int8, xlen) - changed = true - } - - var mk uint16 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int16) - v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int16) - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]int16, xlen) - changed = true - } - - var mk uint16 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int32) - v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int32) - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]int32, xlen) - changed = true - } - - var mk uint16 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int64) - v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int64) - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int64, xlen) - changed = true - } - - var mk uint16 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float32) - v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]float32) - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]float32, xlen) - changed = true - } - - var mk uint16 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float64) - v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]float64) - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]float64, xlen) - changed = true - } - - var mk uint16 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]bool) - v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]bool) - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]bool, xlen) - changed = true - } - - var mk uint16 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]interface{}) - v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]interface{}) - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]string) - v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]string) - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]string, xlen) - changed = true - } - - var mk uint32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint) - v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint) - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint, xlen) - changed = true - } - - var mk uint32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint8) - v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint8) - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]uint8, xlen) - changed = true - } - - var mk uint32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint16) - v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint16) - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]uint16, xlen) - changed = true - } - - var mk uint32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint32) - v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint32) - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]uint32, xlen) - changed = true - } - - var mk uint32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint64) - v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint64) - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint64, xlen) - changed = true - } - - var mk uint32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uintptr) - v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uintptr) - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uintptr, xlen) - changed = true - } - - var mk uint32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int) - v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int) - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int, xlen) - changed = true - } - - var mk uint32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int8) - v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int8) - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]int8, xlen) - changed = true - } - - var mk uint32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int16) - v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int16) - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]int16, xlen) - changed = true - } - - var mk uint32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int32) - v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int32) - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]int32, xlen) - changed = true - } - - var mk uint32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int64) - v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int64) - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int64, xlen) - changed = true - } - - var mk uint32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float32) - v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]float32) - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]float32, xlen) - changed = true - } - - var mk uint32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float64) - v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]float64) - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]float64, xlen) - changed = true - } - - var mk uint32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]bool) - v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]bool) - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]bool, xlen) - changed = true - } - - var mk uint32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]interface{}) - v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]interface{}) - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]string) - v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]string) - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]string, xlen) - changed = true - } - - var mk uint64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint) - v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint) - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint, xlen) - changed = true - } - - var mk uint64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint8) - v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint8) - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]uint8, xlen) - changed = true - } - - var mk uint64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint16) - v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint16) - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]uint16, xlen) - changed = true - } - - var mk uint64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint32) - v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint32) - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]uint32, xlen) - changed = true - } - - var mk uint64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint64) - v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint64) - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint64, xlen) - changed = true - } - - var mk uint64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uintptr) - v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uintptr) - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uintptr, xlen) - changed = true - } - - var mk uint64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int) - v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int) - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int, xlen) - changed = true - } - - var mk uint64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int8) - v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int8) - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]int8, xlen) - changed = true - } - - var mk uint64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int16) - v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int16) - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]int16, xlen) - changed = true - } - - var mk uint64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int32) - v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int32) - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]int32, xlen) - changed = true - } - - var mk uint64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int64) - v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int64) - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int64, xlen) - changed = true - } - - var mk uint64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float32) - v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]float32) - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]float32, xlen) - changed = true - } - - var mk uint64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float64) - v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]float64) - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]float64, xlen) - changed = true - } - - var mk uint64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]bool) - v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]bool) - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]bool, xlen) - changed = true - } - - var mk uint64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]interface{}) - v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]interface{}) - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uintptr - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]string) - v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]string) - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]string, xlen) - changed = true - } - - var mk uintptr - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint) - v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint) - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint, xlen) - changed = true - } - - var mk uintptr - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint8) - v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint8) - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]uint8, xlen) - changed = true - } - - var mk uintptr - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint16) - v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint16) - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]uint16, xlen) - changed = true - } - - var mk uintptr - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint32) - v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint32) - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]uint32, xlen) - changed = true - } - - var mk uintptr - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint64) - v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint64) - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint64, xlen) - changed = true - } - - var mk uintptr - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uintptr) - v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uintptr) - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uintptr, xlen) - changed = true - } - - var mk uintptr - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int) - v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int) - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int, xlen) - changed = true - } - - var mk uintptr - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int8) - v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int8) - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]int8, xlen) - changed = true - } - - var mk uintptr - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int16) - v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int16) - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]int16, xlen) - changed = true - } - - var mk uintptr - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int32) - v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int32) - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]int32, xlen) - changed = true - } - - var mk uintptr - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int64) - v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int64) - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int64, xlen) - changed = true - } - - var mk uintptr - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float32) - v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]float32) - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]float32, xlen) - changed = true - } - - var mk uintptr - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float64) - v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]float64) - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]float64, xlen) - changed = true - } - - var mk uintptr - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]bool) - v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]bool) - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]bool, xlen) - changed = true - } - - var mk uintptr - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]interface{}) - v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]interface{}) - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]string) - v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]string) - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]string, xlen) - changed = true - } - - var mk int - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint) - v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint) - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint, xlen) - changed = true - } - - var mk int - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint8) - v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint8) - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]uint8, xlen) - changed = true - } - - var mk int - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint16) - v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint16) - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]uint16, xlen) - changed = true - } - - var mk int - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint32) - v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint32) - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]uint32, xlen) - changed = true - } - - var mk int - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint64) - v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint64) - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint64, xlen) - changed = true - } - - var mk int - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uintptr) - v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uintptr) - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uintptr, xlen) - changed = true - } - - var mk int - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int) - v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int) - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int, xlen) - changed = true - } - - var mk int - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int8) - v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int8) - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]int8, xlen) - changed = true - } - - var mk int - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int16) - v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int16) - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]int16, xlen) - changed = true - } - - var mk int - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int32) - v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int32) - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]int32, xlen) - changed = true - } - - var mk int - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int64) - v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int64) - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int64, xlen) - changed = true - } - - var mk int - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float32) - v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]float32) - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]float32, xlen) - changed = true - } - - var mk int - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float64) - v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]float64) - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]float64, xlen) - changed = true - } - - var mk int - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]bool) - v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]bool) - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]bool, xlen) - changed = true - } - - var mk int - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]interface{}) - v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]interface{}) - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int8 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]string) - v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]string) - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]string, xlen) - changed = true - } - - var mk int8 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint) - v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint) - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint, xlen) - changed = true - } - - var mk int8 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint8) - v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint8) - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]uint8, xlen) - changed = true - } - - var mk int8 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint16) - v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint16) - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]uint16, xlen) - changed = true - } - - var mk int8 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint32) - v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint32) - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]uint32, xlen) - changed = true - } - - var mk int8 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint64) - v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint64) - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint64, xlen) - changed = true - } - - var mk int8 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uintptr) - v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uintptr) - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uintptr, xlen) - changed = true - } - - var mk int8 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int) - v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int) - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int, xlen) - changed = true - } - - var mk int8 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int8) - v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int8) - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]int8, xlen) - changed = true - } - - var mk int8 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int16) - v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int16) - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]int16, xlen) - changed = true - } - - var mk int8 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int32) - v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int32) - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]int32, xlen) - changed = true - } - - var mk int8 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int64) - v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int64) - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int64, xlen) - changed = true - } - - var mk int8 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float32) - v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]float32) - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]float32, xlen) - changed = true - } - - var mk int8 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float64) - v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]float64) - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]float64, xlen) - changed = true - } - - var mk int8 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]bool) - v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]bool) - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]bool, xlen) - changed = true - } - - var mk int8 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]interface{}) - v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]interface{}) - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int16 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]string) - v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]string) - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]string, xlen) - changed = true - } - - var mk int16 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint) - v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint) - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint, xlen) - changed = true - } - - var mk int16 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint8) - v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint8) - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]uint8, xlen) - changed = true - } - - var mk int16 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint16) - v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint16) - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]uint16, xlen) - changed = true - } - - var mk int16 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint32) - v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint32) - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]uint32, xlen) - changed = true - } - - var mk int16 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint64) - v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint64) - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint64, xlen) - changed = true - } - - var mk int16 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uintptr) - v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uintptr) - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uintptr, xlen) - changed = true - } - - var mk int16 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int) - v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int) - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int, xlen) - changed = true - } - - var mk int16 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int8) - v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int8) - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]int8, xlen) - changed = true - } - - var mk int16 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int16) - v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int16) - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]int16, xlen) - changed = true - } - - var mk int16 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int32) - v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int32) - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]int32, xlen) - changed = true - } - - var mk int16 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int64) - v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int64) - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int64, xlen) - changed = true - } - - var mk int16 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float32) - v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]float32) - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]float32, xlen) - changed = true - } - - var mk int16 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float64) - v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]float64) - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]float64, xlen) - changed = true - } - - var mk int16 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]bool) - v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]bool) - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]bool, xlen) - changed = true - } - - var mk int16 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]interface{}) - v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]interface{}) - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]string) - v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]string) - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]string, xlen) - changed = true - } - - var mk int32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint) - v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint) - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint, xlen) - changed = true - } - - var mk int32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint8) - v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint8) - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]uint8, xlen) - changed = true - } - - var mk int32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint16) - v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint16) - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]uint16, xlen) - changed = true - } - - var mk int32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint32) - v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint32) - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]uint32, xlen) - changed = true - } - - var mk int32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint64) - v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint64) - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint64, xlen) - changed = true - } - - var mk int32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uintptr) - v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uintptr) - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uintptr, xlen) - changed = true - } - - var mk int32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int) - v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int) - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int, xlen) - changed = true - } - - var mk int32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int8) - v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int8) - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]int8, xlen) - changed = true - } - - var mk int32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int16) - v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int16) - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]int16, xlen) - changed = true - } - - var mk int32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int32) - v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int32) - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]int32, xlen) - changed = true - } - - var mk int32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int64) - v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int64) - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int64, xlen) - changed = true - } - - var mk int32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float32) - v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]float32) - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]float32, xlen) - changed = true - } - - var mk int32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float64) - v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]float64) - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]float64, xlen) - changed = true - } - - var mk int32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]bool) - v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]bool) - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]bool, xlen) - changed = true - } - - var mk int32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]interface{}) - v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]interface{}) - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]string) - v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]string) - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]string, xlen) - changed = true - } - - var mk int64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint) - v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint) - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint, xlen) - changed = true - } - - var mk int64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint8) - v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint8) - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]uint8, xlen) - changed = true - } - - var mk int64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint16) - v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint16) - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]uint16, xlen) - changed = true - } - - var mk int64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint32) - v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint32) - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]uint32, xlen) - changed = true - } - - var mk int64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint64) - v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint64) - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint64, xlen) - changed = true - } - - var mk int64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uintptr) - v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uintptr) - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uintptr, xlen) - changed = true - } - - var mk int64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int) - v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int) - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int, xlen) - changed = true - } - - var mk int64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int8) - v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int8) - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]int8, xlen) - changed = true - } - - var mk int64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int16) - v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int16) - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]int16, xlen) - changed = true - } - - var mk int64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int32) - v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int32) - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]int32, xlen) - changed = true - } - - var mk int64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int64) - v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int64) - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int64, xlen) - changed = true - } - - var mk int64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float32) - v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]float32) - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]float32, xlen) - changed = true - } - - var mk int64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float64) - v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]float64) - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]float64, xlen) - changed = true - } - - var mk int64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]bool) - v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]bool) - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]bool, xlen) - changed = true - } - - var mk int64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]interface{}) - v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]interface{}) - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk bool - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]string) - v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]string) - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]string, xlen) - changed = true - } - - var mk bool - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint) - v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint) - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint, xlen) - changed = true - } - - var mk bool - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint8) - v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint8) - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]uint8, xlen) - changed = true - } - - var mk bool - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint16) - v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint16) - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]uint16, xlen) - changed = true - } - - var mk bool - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint32) - v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint32) - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]uint32, xlen) - changed = true - } - - var mk bool - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint64) - v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint64) - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint64, xlen) - changed = true - } - - var mk bool - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uintptr) - v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uintptr) - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uintptr, xlen) - changed = true - } - - var mk bool - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int) - v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int) - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int, xlen) - changed = true - } - - var mk bool - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int8) - v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int8) - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]int8, xlen) - changed = true - } - - var mk bool - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int16) - v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int16) - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]int16, xlen) - changed = true - } - - var mk bool - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int32) - v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int32) - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]int32, xlen) - changed = true - } - - var mk bool - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int64) - v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int64) - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int64, xlen) - changed = true - } - - var mk bool - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float32) - v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]float32) - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]float32, xlen) - changed = true - } - - var mk bool - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float64) - v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]float64) - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]float64, xlen) - changed = true - } - - var mk bool - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]bool) - v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]bool) - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]bool, xlen) - changed = true - } - - var mk bool - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl deleted file mode 100644 index c3ffdf93d..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ /dev/null @@ -1,527 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, {{ .FastpathLen }} // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < {{ .FastpathLen }} && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { cr.sendContainerState(containerArrayElem) } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} -} - -func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerMapEnd) } -} - -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - {{end}}if e.h.Canonical { - {{if eq .MapKey "interface{}"}}{{/* out of band - */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - e.asis(v2[j].v) - if cr != nil { cr.sendContainerState(containerMapValue) } - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i int - for k, _ := range v { - v2[i] = {{ $x }}(k) - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ encmd .MapKey "k2"}}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ encmd .Elem "v2"}} - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} -} - -{{end}}{{end}}{{end}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} - vp := rv.Addr().Interface().(*[]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { - dd := d.d - {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { {{/* - // fast-path is for "basic" immutable types, so no need to copy them over - // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) - // copy(s, v[:cap(v)]) - // v = s */}} - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]{{ .Elem }}, xlen) - } - } else { - v = make([]{{ .Elem }}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } {{/* // all checks done. cannot go past len. */}} - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - if xtrunc { {{/* // means canChange=true, changed=true already. */}} - for ; j < containerLenS; j++ { - v = append(v, {{ zerocmd .Elem }}) - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} - if breakFound { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]{{ .Elem }}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { {{/* // all checks done. cannot go past len. */}} - {{ if eq .Elem "interface{}" }}d.decode(&v[j]) - {{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -{{end}}{{end}}{{end}} - - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, - d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd := d.d - cr := d.cr - {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) - v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) - changed = true - } - {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} - var mk {{ .MapKey }} - var mv {{ .Elem }} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) } - return v, changed -} - -{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go deleted file mode 100644 index 63e591145..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build notfastpath - -package codec - -import "reflect" - -const fastpathEnabled = false - -// The generated fast-path code is very large, and adds a few seconds to the build time. -// This causes test execution, execution of small tools which use codec, etc -// to take a long time. -// -// To mitigate, we now support the notfastpath tag. -// This tag disables fastpath during build, allowing for faster build, test execution, -// short-program runs, etc. - -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } - -type fastpathT struct{} -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} -type fastpathA [0]fastpathE - -func (x fastpathA) index(rtid uintptr) int { return -1 } - -var fastpathAV fastpathA -var fastpathTV fastpathT diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl deleted file mode 100644 index 32df54144..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,104 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl deleted file mode 100644 index 77400e0a1..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,58 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go deleted file mode 100644 index eb0bdad35..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ /dev/null @@ -1,243 +0,0 @@ -/* // +build ignore */ - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e: e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d: d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.raw(iv) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { - return f.d.raw() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl deleted file mode 100644 index ad99f6671..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,372 +0,0 @@ -/* // +build ignore */ - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e:e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d:d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.raw(iv) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { - return f.d.raw() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} - -{{/* - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncDriver() encDriver { - return f.e.e -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDriver() decDriver { - return f.d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncNil() { - f.e.e.EncodeNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBytes(v []byte) { - f.e.e.EncodeStringBytes(c_RAW, v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayStart(length int) { - f.e.e.EncodeArrayStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEnd() { - f.e.e.EncodeArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEntrySeparator() { - f.e.e.EncodeArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapStart(length int) { - f.e.e.EncodeMapStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEnd() { - f.e.e.EncodeMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEntrySeparator() { - f.e.e.EncodeMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapKVSeparator() { - f.e.e.EncodeMapKVSeparator() -} - -// --------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBytes(v *[]byte) { - *v = f.d.d.DecodeBytes(*v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTryNil() bool { - return f.d.d.TryDecodeAsNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsNil() (b bool) { - return f.d.d.IsContainerType(valueTypeNil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsMap() (b bool) { - return f.d.d.IsContainerType(valueTypeMap) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsArray() (b bool) { - return f.d.d.IsContainerType(valueTypeArray) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecCheckBreak() bool { - return f.d.d.CheckBreak() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapStart() int { - return f.d.d.ReadMapStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayStart() int { - return f.d.d.ReadArrayStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEnd() { - f.d.d.ReadMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEnd() { - f.d.d.ReadArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEntrySeparator() { - f.d.d.ReadArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEntrySeparator() { - f.d.d.ReadMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapKVSeparator() { - f.d.d.ReadMapKVSeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { - return f.d.d.DecodeStringAsBytes(bs) -} - - -// -- encode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { - ee := f.e.e - {{ encmd .Primitive "v" }} -} -{{ end }}{{ end }}{{ end }} - -// -- decode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { - dd := f.d.d - *vp = {{ decmd .Primitive }} -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { - dd := f.d.d - v = {{ decmd .Primitive }} - return -} -{{ end }}{{ end }}{{ end }} - - -// -- encode calls (slices/maps) -{{range .Values}}{{if not .Primitive }}{{if .Slice }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) -} -{{ end }}{{ end }} - -// -- decode calls (slices/maps) -{{range .Values}}{{if not .Primitive }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { -{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) - if changed { - *vp = v - } -} -{{ end }}{{ end }} -*/}} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go deleted file mode 100644 index 2ace97b78..000000000 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl - -const genDecMapTmpl = ` -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) -` - -const genDecListTmpl = ` -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} -` - diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go deleted file mode 100644 index c4944dbff..000000000 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ /dev/null @@ -1,2014 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "go/format" - "io" - "io/ioutil" - "math/rand" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" - "unicode" - "unicode/utf8" -) - -// --------------------------------------------------- -// codecgen supports the full cycle of reflection-based codec: -// - RawExt -// - Raw -// - Builtins -// - Extensions -// - (Binary|Text|JSON)(Unm|M)arshal -// - generic by-kind -// -// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. -// In those areas, we try to only do reflection or interface-conversion when NECESSARY: -// - Extensions, only if Extensions are configured. -// -// However, codecgen doesn't support the following: -// - Canonical option. (codecgen IGNORES it currently) -// This is just because it has not been implemented. -// -// During encode/decode, Selfer takes precedence. -// A type implementing Selfer will know how to encode/decode itself statically. -// -// The following field types are supported: -// array: [n]T -// slice: []T -// map: map[K]V -// primitive: [u]int[n], float(32|64), bool, string -// struct -// -// --------------------------------------------------- -// Note that a Selfer cannot call (e|d).(En|De)code on itself, -// as this will cause a circular reference, as (En|De)code will call Selfer methods. -// Any type that implements Selfer must implement completely and not fallback to (En|De)code. -// -// In addition, code in this file manages the generation of fast-path implementations of -// encode/decode of slices/maps of primitive keys/values. -// -// Users MUST re-generate their implementations whenever the code shape changes. -// The generated code will panic if it was generated with a version older than the supporting library. -// --------------------------------------------------- -// -// codec framework is very feature rich. -// When encoding or decoding into an interface, it depends on the runtime type of the interface. -// The type of the interface may be a named type, an extension, etc. -// Consequently, we fallback to runtime codec for encoding/decoding interfaces. -// In addition, we fallback for any value which cannot be guaranteed at runtime. -// This allows us support ANY value, including any named types, specifically those which -// do not implement our interfaces (e.g. Selfer). -// -// This explains some slowness compared to other code generation codecs (e.g. msgp). -// This reduction in speed is only seen when your refers to interfaces, -// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } -// -// codecgen will panic if the file was generated with an old version of the library in use. -// -// Note: -// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. -// This way, there isn't a function call overhead just to see that we should not enter a block of code. - -// GenVersion is the current version of codecgen. -// -// NOTE: Increment this value each time codecgen changes fundamentally. -// Fundamental changes are: -// - helper methods change (signature change, new ones added, some removed, etc) -// - codecgen command line changes -// -// v1: Initial Version -// v2: -// v3: Changes for Kubernetes: -// changes in signature of some unpublished helper methods and codecgen cmdline arguments. -// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) -// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -const GenVersion = 5 - -const ( - genCodecPkg = "codec1978" - genTempVarPfx = "yy" - genTopLevelVarName = "x" - - // ignore canBeNil parameter, and always set to true. - // This is because nil can appear anywhere, so we should always check. - genAnythingCanBeNil = true - - // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; - // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals - // are not executed a lot. - // - // From testing, it didn't make much difference in runtime, so keep as true (one function only) - genUseOneFunctionForDecStructMap = true -) - -type genStructMapStyle uint8 - -const ( - genStructMapStyleConsolidated genStructMapStyle = iota - genStructMapStyleLenPrefix - genStructMapStyleCheckBreak -) - -var ( - genAllTypesSamePkgErr = errors.New("All types must be in the same package") - genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") - genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") - genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) - genCheckVendor bool -) - -// genRunner holds some state used during a Gen run. -type genRunner struct { - w io.Writer // output - c uint64 // counter used for generating varsfx - t []reflect.Type // list of types to run selfer on - - tc reflect.Type // currently running selfer on this type - te map[uintptr]bool // types for which the encoder has been created - td map[uintptr]bool // types for which the decoder has been created - cp string // codec import path - - im map[string]reflect.Type // imports to add - imn map[string]string // package names of imports to add - imc uint64 // counter for import numbers - - is map[reflect.Type]struct{} // types seen during import search - bp string // base PkgPath, for which we are generating for - - cpfx string // codec package prefix - unsafe bool // is unsafe to be used in generated code? - - tm map[reflect.Type]struct{} // types for which enc/dec must be generated - ts []reflect.Type // types for which enc/dec must be generated - - xs string // top level variable/constant suffix - hn string // fn helper type name - - ti *TypeInfos - // rr *rand.Rand // random generator for file-specific types -} - -// Gen will write a complete go file containing Selfer implementations for each -// type passed. All the types must be in the same package. -// -// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* -func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { - // All types passed to this method do not have a codec.Selfer method implemented directly. - // codecgen already checks the AST and skips any types that define the codec.Selfer methods. - // Consequently, there's no need to check and trim them if they implement codec.Selfer - - if len(typ) == 0 { - return - } - x := genRunner{ - unsafe: useUnsafe, - w: w, - t: typ, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(typ[0]), - xs: uid, - ti: ti, - } - if x.ti == nil { - x.ti = defTypeInfos - } - if x.xs == "" { - rr := rand.New(rand.NewSource(time.Now().UnixNano())) - x.xs = strconv.FormatInt(rr.Int63n(9999), 10) - } - - // gather imports first: - x.cp = genImportPath(reflect.TypeOf(x)) - x.imn[x.cp] = genCodecPkg - for _, t := range typ { - // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - if genImportPath(t) != x.bp { - panic(genAllTypesSamePkgErr) - } - x.genRefPkgs(t) - } - if buildTags != "" { - x.line("// +build " + buildTags) - x.line("") - } - x.line(` - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -`) - x.line("package " + pkgName) - x.line("") - x.line("import (") - if x.cp != x.bp { - x.cpfx = genCodecPkg + "." - x.linef("%s \"%s\"", genCodecPkg, x.cp) - } - // use a sorted set of im keys, so that we can get consistent output - imKeys := make([]string, 0, len(x.im)) - for k, _ := range x.im { - imKeys = append(imKeys, k) - } - sort.Strings(imKeys) - for _, k := range imKeys { // for k, _ := range x.im { - x.linef("%s \"%s\"", x.imn[k], k) - } - // add required packages - for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { - if _, ok := x.im[k]; !ok { - if k == "unsafe" && !x.unsafe { - continue - } - x.line("\"" + k + "\"") - } - } - x.line(")") - x.line("") - - x.line("const (") - x.linef("// ----- content types ----") - x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) - x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) - x.linef("// ----- value types used ----") - x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) - x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) - x.linef("// ----- containerStateValues ----") - x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) - x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) - x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) - x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) - x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) - x.line(")") - x.line("var (") - x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") - x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") - x.line(")") - x.line("") - - if x.unsafe { - x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") - x.line("") - } - x.hn = "codecSelfer" + x.xs - x.line("type " + x.hn + " struct{}") - x.line("") - - x.varsfxreset() - x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) - x.line("_, file, _, _ := runtime.Caller(0)") - x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) - x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) - x.line("panic(err)") - x.linef("}") - x.line("if false { // reference the types, but skip this branch at build/run time") - var n int - // for k, t := range x.im { - for _, k := range imKeys { - t := x.im[k] - x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) - n++ - } - if x.unsafe { - x.linef("var v%v unsafe.Pointer", n) - n++ - } - if n > 0 { - x.out("_") - for i := 1; i < n; i++ { - x.out(", _") - } - x.out(" = v0") - for i := 1; i < n; i++ { - x.outf(", v%v", i) - } - } - x.line("} ") // close if false - x.line("}") // close init - x.line("") - - // generate rest of type info - for _, t := range typ { - x.tc = t - x.selfer(true) - x.selfer(false) - } - - for _, t := range x.ts { - rtid := reflect.ValueOf(t).Pointer() - // generate enc functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(true) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.encListFallback("v", t) - case reflect.Map: - x.encMapFallback("v", t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - - // generate dec functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(false) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.decListFallback("v", rtid, t) - case reflect.Map: - x.decMapFallback("v", rtid, t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - } - - x.line("") -} - -func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { - // return varname != genTopLevelVarName && t != x.tc - // the only time we checkForSelfer is if we are not at the TOP of the generated code. - return varname != genTopLevelVarName -} - -func (x *genRunner) arr2str(t reflect.Type, s string) string { - if t.Kind() == reflect.Array { - return s - } - return "" -} - -func (x *genRunner) genRequiredMethodVars(encode bool) { - x.line("var h " + x.hn) - if encode { - x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") - } else { - x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") - } - x.line("_, _, _ = h, z, r") -} - -func (x *genRunner) genRefPkgs(t reflect.Type) { - if _, ok := x.is[t]; ok { - return - } - // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - x.is[t] = struct{}{} - tpkg, tname := genImportPath(t), t.Name() - if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { - if _, ok := x.im[tpkg]; !ok { - x.im[tpkg] = t - if idx := strings.LastIndex(tpkg, "/"); idx < 0 { - x.imn[tpkg] = tpkg - } else { - x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) - } - } - } - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: - x.genRefPkgs(t.Elem()) - case reflect.Map: - x.genRefPkgs(t.Elem()) - x.genRefPkgs(t.Key()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { - x.genRefPkgs(t.Field(i).Type) - } - } - } -} - -func (x *genRunner) line(s string) { - x.out(s) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) varsfx() string { - x.c++ - return strconv.FormatUint(x.c, 10) -} - -func (x *genRunner) varsfxreset() { - x.c = 0 -} - -func (x *genRunner) out(s string) { - if _, err := io.WriteString(x.w, s); err != nil { - panic(err) - } -} - -func (x *genRunner) linef(s string, params ...interface{}) { - x.line(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) outf(s string, params ...interface{}) { - x.out(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) genTypeName(t reflect.Type) (n string) { - // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() - - // if the type has a PkgPath, which doesn't match the current package, - // then include it. - // We cannot depend on t.String() because it includes current package, - // or t.PkgPath because it includes full import path, - // - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "*" - t = t.Elem() - } - if tn := t.Name(); tn != "" { - return ptrPfx + x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) - case reflect.Slice: - return ptrPfx + "[]" + x.genTypeName(t.Elem()) - case reflect.Array: - return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) - case reflect.Chan: - return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) - default: - if t == intfTyp { - return ptrPfx + "interface{}" - } else { - return ptrPfx + x.genTypeNamePrim(t) - } - } -} - -func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { - if t.Name() == "" { - return t.String() - } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { - return t.Name() - } else { - return x.imn[genImportPath(t)] + "." + t.Name() - // return t.String() // best way to get the package name inclusive - } -} - -func (x *genRunner) genZeroValueR(t reflect.Type) string { - // if t is a named type, w - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, - reflect.Slice, reflect.Map, reflect.Invalid: - return "nil" - case reflect.Bool: - return "false" - case reflect.String: - return `""` - case reflect.Struct, reflect.Array: - return x.genTypeName(t) + "{}" - default: // all numbers - return "0" - } -} - -func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { - return genMethodNameT(t, x.tc) -} - -func (x *genRunner) selfer(encode bool) { - t := x.tc - t0 := t - // always make decode use a pointer receiver, - // and structs always use a ptr receiver (encode|decode) - isptr := !encode || t.Kind() == reflect.Struct - x.varsfxreset() - fnSigPfx := "func (x " - if isptr { - fnSigPfx += "*" - } - fnSigPfx += x.genTypeName(t) - - x.out(fnSigPfx) - if isptr { - t = reflect.PtrTo(t) - } - if encode { - x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") - x.genRequiredMethodVars(true) - // x.enc(genTopLevelVarName, t) - x.encVar(genTopLevelVarName, t) - } else { - x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - // do not use decVar, as there is no need to check TryDecodeAsNil - // or way to elegantly handle that, and also setting it to a - // non-nil value doesn't affect the pointer passed. - // x.decVar(genTopLevelVarName, t, false) - x.dec(genTopLevelVarName, t0) - } - x.line("}") - x.line("") - - if encode || t0.Kind() != reflect.Struct { - return - } - - // write is containerMap - if genUseOneFunctionForDecStructMap { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) - x.line("}") - x.line("") - } else { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) - x.line("}") - x.line("") - - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) - x.line("}") - x.line("") - } - - // write containerArray - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) - x.line("}") - x.line("") - -} - -// used for chan, array, slice, map -func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { - if encode { - x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) - } else { - x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) - } - x.registerXtraT(t) -} - -func (x *genRunner) registerXtraT(t reflect.Type) { - // recursively register the types - if _, ok := x.tm[t]; ok { - return - } - var tkey reflect.Type - switch t.Kind() { - case reflect.Chan, reflect.Slice, reflect.Array: - case reflect.Map: - tkey = t.Key() - default: - return - } - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) - // check if this refers to any xtra types eg. a slice of array: add the array - x.registerXtraT(t.Elem()) - if tkey != nil { - x.registerXtraT(tkey) - } -} - -// encVar will encode a variable. -// The parameter, t, is the reflect.Type of the variable itself -func (x *genRunner) encVar(varname string, t reflect.Type) { - // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) - var checkNil bool - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: - checkNil = true - } - if checkNil { - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - } - switch t.Kind() { - case reflect.Ptr: - switch t.Elem().Kind() { - case reflect.Struct, reflect.Array: - x.enc(varname, genNonPtr(t)) - default: - i := x.varsfx() - x.line(genTempVarPfx + i + " := *" + varname) - x.enc(genTempVarPfx+i, genNonPtr(t)) - } - case reflect.Struct, reflect.Array: - i := x.varsfx() - x.line(genTempVarPfx + i + " := &" + varname) - x.enc(genTempVarPfx+i, t) - default: - x.enc(varname, t) - } - - if checkNil { - x.line("}") - } - -} - -// enc will encode a variable (varname) of type t, -// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) -func (x *genRunner) enc(varname string, t reflect.Type) { - rtid := reflect.ValueOf(t).Pointer() - // We call CodecEncodeSelf if one of the following are honored: - // - the type already implements Selfer, call that - // - the type has a Selfer implementation just created, use that - // - the type is in the list of the ones we will generate for, but it is not currently being generated - - mi := x.varsfx() - tptr := reflect.PtrTo(t) - tk := t.Kind() - if x.checkForSelfer(t, varname) { - if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T - if tptr.Implements(selferTyp) || t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } else { // varname is of type T - if t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } else if tptr.Implements(selferTyp) { - x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) - x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) - return - } - } - - if _, ok := x.te[rtid]; ok { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.te[rtid] = true - rtidAdded = true - } - - // check if - // - type is RawExt, Raw - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawTyp { - x.linef("} else { z.EncRaw(%v)", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.EncodeRawExt(%v, e)", varname) - return - } - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) - } - if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T - if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } - } else { // varname is of type T - if t.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) - } - } - x.line("} else {") - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(int64(" + varname + "))") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(uint64(" + varname + "))") - case reflect.Float32: - x.line("r.EncodeFloat32(float32(" + varname + "))") - case reflect.Float64: - x.line("r.EncodeFloat64(float64(" + varname + "))") - case reflect.Bool: - x.line("r.EncodeBool(bool(" + varname + "))") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") - case reflect.Chan: - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - case reflect.Array: - x.xtraSM(varname, true, t) - case reflect.Slice: - // if nil, call dedicated function - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") - } else { - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - } - case reflect.Map: - // if nil, call dedicated function - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") - } else { - x.xtraSM(varname, true, t) - // x.encMapFallback(varname, rtid, t) - } - case reflect.Struct: - if !inlist { - delete(x.te, rtid) - x.line("z.EncFallback(" + varname + ")") - break - } - x.encStruct(varname, rtid, t) - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.EncFallback(" + varname + ")") - } -} - -func (x *genRunner) encZero(t reflect.Type) { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(0)") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(0)") - case reflect.Float32: - x.line("r.EncodeFloat32(0)") - case reflect.Float64: - x.line("r.EncodeFloat64(0)") - case reflect.Bool: - x.line("r.EncodeBool(false)") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) - default: - x.line("r.EncodeNil()") - } -} - -func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { - // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) - // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it - - // if t === type currently running selfer on, do for all - ti := x.ti.get(rtid, t) - i := x.varsfx() - sepVarname := genTempVarPfx + "sep" + i - numfieldsvar := genTempVarPfx + "q" + i - ti2arrayvar := genTempVarPfx + "r" + i - struct2arrvar := genTempVarPfx + "2arr" + i - - x.line(sepVarname + " := !z.EncBinary()") - x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - // due to omitEmpty, we need to calculate the - // number of non-empty things we write out first. - // This is required as we need to pre-determine the size of the container, - // to support length-prefixing. - x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) - x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) - x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) - nn := 0 - for j, si := range tisfi { - if !si.omitEmpty { - nn++ - continue - } - var t2 reflect.StructField - var omitline string - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - omitline += varname3 + " != nil && " - } - } - } - // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) - switch t2.Type.Kind() { - case reflect.Struct: - omitline += " true" - case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: - omitline += "len(" + varname + "." + t2.Name + ") != 0" - default: - omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) - } - x.linef("%s[%v] = %s", numfieldsvar, j, omitline) - } - x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") - x.linef("} else {") // if not ti.toArray - x.linef("%snn%s = %v", genTempVarPfx, i, nn) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") - x.line("}") // close if not StructToArray - - for j, si := range tisfi { - i := x.varsfx() - isNilVarName := genTempVarPfx + "n" + i - var labelUsed bool - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - if !labelUsed { - x.line("var " + isNilVarName + " bool") - } - x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") - x.line("goto LABEL" + i) - x.line("}") - labelUsed = true - // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") - } - } - // t2 = t.FieldByIndex(si.is) - } - if labelUsed { - x.line("LABEL" + i + ":") - } - // if the type of the field is a Selfer, or one of the ones - - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - } - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.encVar(varname+"."+t2.Name, t2.Type) - if si.omitEmpty { - x.linef("} else {") - x.encZero(t2.Type) - x.linef("}") - } - if labelUsed { - x.line("}") - } - - x.linef("} else {") // if not ti.toArray - - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - x.encVar(varname+"."+t2.Name, t2.Type) - x.line("}") - } else { - x.encVar(varname+"."+t2.Name, t2.Type) - } - if si.omitEmpty { - x.line("}") - } - x.linef("} ") // end if/else ti.toArray - } - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - x.line("}") - -} - -func (x *genRunner) encListFallback(varname string, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) - return - } - i := x.varsfx() - g := genTempVarPfx - x.line("r.EncodeArrayStart(len(" + varname + "))") - if t.Kind() == reflect.Chan { - x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef("%sv%s := <-%s", g, i, varname) - } else { - // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - } - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) encMapFallback(varname string, t reflect.Type) { - // TODO: expand this to handle canonical. - i := x.varsfx() - x.line("r.EncodeMapStart(len(" + varname + "))") - x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - i := x.varsfx() - if !canBeNil { - canBeNil = genAnythingCanBeNil || !genIsImmutable(t) - } - if canBeNil { - x.line("if r.TryDecodeAsNil() {") - if t.Kind() == reflect.Ptr { - x.line("if " + varname + " != nil { ") - - // if varname is a field of a struct (has a dot in it), - // then just set it to nil - if strings.IndexByte(varname, '.') != -1 { - x.line(varname + " = nil") - } else { - x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) - } - x.line("}") - } else { - x.line(varname + " = " + x.genZeroValueR(t)) - } - x.line("} else {") - } else { - x.line("// cannot be nil") - } - if t.Kind() != reflect.Ptr { - if x.decTryAssignPrimitive(varname, t) { - x.line(genTempVarPfx + "v" + i + " := &" + varname) - x.dec(genTempVarPfx+"v"+i, t) - } - } else { - x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) - // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). - // There's a chance of a **T in here which is nil. - var ptrPfx string - for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { - ptrPfx += "*" - x.linef("if %s%s == nil { %s%s = new(%s)}", - ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) - } - // if varname has [ in it, then create temp variable for this ptr thingie - if strings.Index(varname, "[") >= 0 { - varname2 := genTempVarPfx + "w" + i - x.line(varname2 + " := " + varname) - varname = varname2 - } - - if ptrPfx == "" { - x.dec(varname, t) - } else { - x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) - x.dec(genTempVarPfx+"z"+i, t) - } - - } - - if canBeNil { - x.line("} ") - } -} - -// dec will decode a variable (varname) of type ptrTo(t). -// t is always a basetype (i.e. not of kind reflect.Ptr). -func (x *genRunner) dec(varname string, t reflect.Type) { - // assumptions: - // - the varname is to a pointer already. No need to take address of it - // - t is always a baseType T (not a *T, etc). - rtid := reflect.ValueOf(t).Pointer() - tptr := reflect.PtrTo(t) - if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || tptr.Implements(selferTyp) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - if _, ok := x.td[rtid]; ok { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.td[rtid] = true - rtidAdded = true - } - - // check if - // - type is Raw, RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() - x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawTyp { - x.linef("} else { *%v = z.DecRaw()", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) - return - } - - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) - } - - if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { - x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) - } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { - x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) - } - - x.line("} else {") - - // Since these are pointers, we cannot share, and have to use them one by one - switch t.Kind() { - case reflect.Int: - x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecInt((*int)(" + varname + "))") - case reflect.Int8: - x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") - // x.line("z.DecInt8((*int8)(" + varname + "))") - case reflect.Int16: - x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") - // x.line("z.DecInt16((*int16)(" + varname + "))") - case reflect.Int32: - x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") - // x.line("z.DecInt32((*int32)(" + varname + "))") - case reflect.Int64: - x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") - // x.line("z.DecInt64((*int64)(" + varname + "))") - - case reflect.Uint: - x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecUint((*uint)(" + varname + "))") - case reflect.Uint8: - x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") - // x.line("z.DecUint8((*uint8)(" + varname + "))") - case reflect.Uint16: - x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") - //x.line("z.DecUint16((*uint16)(" + varname + "))") - case reflect.Uint32: - x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") - //x.line("z.DecUint32((*uint32)(" + varname + "))") - case reflect.Uint64: - x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") - //x.line("z.DecUint64((*uint64)(" + varname + "))") - case reflect.Uintptr: - x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - - case reflect.Float32: - x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") - //x.line("z.DecFloat32((*float32)(" + varname + "))") - case reflect.Float64: - x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") - // x.line("z.DecFloat64((*float64)(" + varname + "))") - - case reflect.Bool: - x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") - // x.line("z.DecBool((*bool)(" + varname + "))") - case reflect.String: - x.line("*((*string)(" + varname + ")) = r.DecodeString()") - // x.line("z.DecString((*string)(" + varname + "))") - case reflect.Array, reflect.Chan: - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, true, t) - case reflect.Slice: - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") - } else { - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, false, t) - } - case reflect.Map: - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") - } else { - x.xtraSM(varname, false, t) - // x.decMapFallback(varname, rtid, t) - } - case reflect.Struct: - if inlist { - x.decStruct(varname, rtid, t) - } else { - // delete(x.td, rtid) - x.line("z.DecFallback(" + varname + ", false)") - } - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.DecFallback(" + varname + ", true)") - } -} - -func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { - // This should only be used for exact primitives (ie un-named types). - // Named types may be implementations of Selfer, Unmarshaler, etc. - // They should be handled by dec(...) - - if t.Name() != "" { - tryAsPtr = true - return - } - - switch t.Kind() { - case reflect.Int: - x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) - case reflect.Int8: - x.linef("%s = r.DecodeInt(8)", varname) - case reflect.Int16: - x.linef("%s = r.DecodeInt(16)", varname) - case reflect.Int32: - x.linef("%s = r.DecodeInt(32)", varname) - case reflect.Int64: - x.linef("%s = r.DecodeInt(64)", varname) - - case reflect.Uint: - x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) - case reflect.Uint8: - x.linef("%s = r.DecodeUint(8)", varname) - case reflect.Uint16: - x.linef("%s = r.DecodeUint(16)", varname) - case reflect.Uint32: - x.linef("%s = r.DecodeUint(32)", varname) - case reflect.Uint64: - x.linef("%s = r.DecodeUint(64)", varname) - case reflect.Uintptr: - x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) - - case reflect.Float32: - x.linef("%s = r.DecodeFloat(true)", varname) - case reflect.Float64: - x.linef("%s = r.DecodeFloat(false)", varname) - - case reflect.Bool: - x.linef("%s = r.DecodeBool()", varname) - case reflect.String: - x.linef("%s = r.DecodeString()", varname) - default: - tryAsPtr = true - } - return -} - -func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) - return - } - type tstruc struct { - TempVar string - Rand string - Varname string - CTyp string - Typ string - Immutable bool - Size int - } - telem := t.Elem() - ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} - - funcs := make(template.FuncMap) - - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["zero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["isArray"] = func() bool { - return t.Kind() == reflect.Array - } - funcs["isSlice"] = func() bool { - return t.Kind() == reflect.Slice - } - funcs["isChan"] = func() bool { - return t.Kind() == reflect.Chan - } - tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - KTyp string - Typ string - Size int - } - telem := t.Elem() - tkey := t.Key() - ts := tstruc{ - genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), - x.genTypeName(telem), int(telem.Size() + tkey.Size()), - } - - funcs := make(template.FuncMap) - funcs["decElemZero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["decElemKindImmutable"] = func() bool { - return genIsImmutable(telem) - } - funcs["decElemKindPtr"] = func() bool { - return telem.Kind() == reflect.Ptr - } - funcs["decElemKindIntf"] = func() bool { - return telem.Kind() == reflect.Interface - } - funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, tkey, false) - return "" - } - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLineK"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - - tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.line("switch (" + kName + ") {") - for _, si := range tisfi { - x.line("case \"" + si.encName + "\":") - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - x.decVar(varname+"."+t2.Name, t2.Type, false) - } - x.line("default:") - // pass the slice here, so that the string will not escape, and maybe save allocation - x.line("z.DecStructFieldNotFound(-1, " + kName + ")") - x.line("} // end switch " + kName) -} - -func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { - tpfx := genTempVarPfx - i := x.varsfx() - kName := tpfx + "s" + i - - // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. - // However, using that was more expensive, as it seems that the switch expression - // is evaluated each time. - // - // We could depend on decodeString using a temporary/shared buffer internally. - // However, this model of creating a byte array, and using explicitly is faster, - // and allows optional use of unsafe []byte->string conversion without alloc. - - // Also, ensure that the slice array doesn't escape. - // That will help escape analysis prevent allocation when it gets better. - - // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") - // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") - // use the scratch buffer to avoid allocation (most field names are < 32). - - x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") - - x.line("_ = " + kName + "Slc") - switch style { - case genStructMapStyleLenPrefix: - x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) - case genStructMapStyleCheckBreak: - x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) - default: // 0, otherwise. - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) - x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) - x.line("} else { if r.CheckBreak() { break }; }") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") - // let string be scoped to this loop alone, so it doesn't escape. - if x.unsafe { - x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + - kName + "Slc[0])), len(" + kName + "Slc)}") - x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") - } else { - x.line(kName + " := string(" + kName + "Slc)") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.decStructMapSwitch(kName, varname, rtid, t) - - x.line("} // end for " + tpfx + "j" + i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - i := x.varsfx() - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.linef("var %sj%s int", tpfx, i) - x.linef("var %sb%s bool", tpfx, i) // break - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - for _, si := range tisfi { - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", - tpfx, i, x.xs, breakString) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.decVar(varname+"."+t2.Name, t2.Type, true) - } - // read remaining values and throw away. - x.line("for {") - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { break }", tpfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) - x.line("}") - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { - // if container is map - i := x.varsfx() - x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) - x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - if genUseOneFunctionForDecStructMap { - x.line("} else { ") - x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) - } else { - x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") - x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") - x.line("} else {") - x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") - } - x.line("}") - - // else if container is array - x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else { ") - x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) - x.line("}") - // else panic - x.line("} else { ") - x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") - x.line("} ") -} - -// -------- - -type genV struct { - // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice - MapKey string - Elem string - Primitive string - Size int -} - -func (x *genRunner) newGenV(t reflect.Type) (v genV) { - switch t.Kind() { - case reflect.Slice, reflect.Array: - te := t.Elem() - v.Elem = x.genTypeName(te) - v.Size = int(te.Size()) - case reflect.Map: - te, tk := t.Elem(), t.Key() - v.Elem = x.genTypeName(te) - v.MapKey = x.genTypeName(tk) - v.Size = int(te.Size() + tk.Size()) - default: - panic("unexpected type for newGenV. Requires map or slice type") - } - return -} - -func (x *genV) MethodNamePfx(prefix string, prim bool) string { - var name []byte - if prefix != "" { - name = append(name, prefix...) - } - if prim { - name = append(name, genTitleCaseName(x.Primitive)...) - } else { - if x.MapKey == "" { - name = append(name, "Slice"...) - } else { - name = append(name, "Map"...) - name = append(name, genTitleCaseName(x.MapKey)...) - } - name = append(name, genTitleCaseName(x.Elem)...) - } - return string(name) - -} - -// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. -// -// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, -// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. -// We strip it here. -func genImportPath(t reflect.Type) (s string) { - s = t.PkgPath() - if genCheckVendor { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } - } - return -} - -// A go identifier is (letter|_)[letter|number|_]* -func genGoIdentifier(s string, checkFirstChar bool) string { - b := make([]byte, 0, len(s)) - t := make([]byte, 4) - var n int - for i, r := range s { - if checkFirstChar && i == 0 && !unicode.IsLetter(r) { - b = append(b, '_') - } - // r must be unicode_letter, unicode_digit or _ - if unicode.IsLetter(r) || unicode.IsDigit(r) { - n = utf8.EncodeRune(t, r) - b = append(b, t[:n]...) - } else { - b = append(b, '_') - } - } - return string(b) -} - -func genNonPtr(t reflect.Type) reflect.Type { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func genTitleCaseName(s string) string { - switch s { - case "interface{}", "interface {}": - return "Intf" - default: - return strings.ToUpper(s[0:1]) + s[1:] - } -} - -func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "Ptrto" - t = t.Elem() - } - tstr := t.String() - if tn := t.Name(); tn != "" { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - return ptrPfx + tn - } else { - if genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) - case reflect.Slice: - return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) - case reflect.Array: - return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) - case reflect.Chan: - var cx string - switch t.ChanDir() { - case reflect.SendDir: - cx = "ChanSend" - case reflect.RecvDir: - cx = "ChanRecv" - default: - cx = "Chan" - } - return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) - default: - if t == intfTyp { - return ptrPfx + "Interface" - } else { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - if t.Name() != "" { - return ptrPfx + t.Name() - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } else { - // best way to get the package name inclusive - // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) - if t.Name() != "" && genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - } -} - -// genCustomNameForType base64encodes the t.String() value in such a way -// that it can be used within a function name. -func genCustomTypeName(tstr string) string { - len2 := genBase64enc.EncodedLen(len(tstr)) - bufx := make([]byte, len2) - genBase64enc.Encode(bufx, []byte(tstr)) - for i := len2 - 1; i >= 0; i-- { - if bufx[i] == '=' { - len2-- - } else { - break - } - } - return string(bufx[:len2]) -} - -func genIsImmutable(t reflect.Type) (v bool) { - return isImmutableKind(t.Kind()) -} - -type genInternal struct { - Values []genV - Unsafe bool -} - -func (x genInternal) FastpathLen() (l int) { - for _, v := range x.Values { - if v.Primitive == "" { - l++ - } - } - return -} - -func genInternalZeroValue(s string) string { - switch s { - case "interface{}", "interface {}": - return "nil" - case "bool": - return "false" - case "string": - return `""` - default: - return "0" - } -} - -func genInternalEncCommandAsString(s string, vname string) string { - switch s { - case "uint", "uint8", "uint16", "uint32", "uint64": - return "ee.EncodeUint(uint64(" + vname + "))" - case "int", "int8", "int16", "int32", "int64": - return "ee.EncodeInt(int64(" + vname + "))" - case "string": - return "ee.EncodeString(c_UTF8, " + vname + ")" - case "float32": - return "ee.EncodeFloat32(" + vname + ")" - case "float64": - return "ee.EncodeFloat64(" + vname + ")" - case "bool": - return "ee.EncodeBool(" + vname + ")" - case "symbol": - return "ee.EncodeSymbol(" + vname + ")" - default: - return "e.encode(" + vname + ")" - } -} - -func genInternalDecCommandAsString(s string) string { - switch s { - case "uint": - return "uint(dd.DecodeUint(uintBitsize))" - case "uint8": - return "uint8(dd.DecodeUint(8))" - case "uint16": - return "uint16(dd.DecodeUint(16))" - case "uint32": - return "uint32(dd.DecodeUint(32))" - case "uint64": - return "dd.DecodeUint(64)" - case "uintptr": - return "uintptr(dd.DecodeUint(uintBitsize))" - case "int": - return "int(dd.DecodeInt(intBitsize))" - case "int8": - return "int8(dd.DecodeInt(8))" - case "int16": - return "int16(dd.DecodeInt(16))" - case "int32": - return "int32(dd.DecodeInt(32))" - case "int64": - return "dd.DecodeInt(64)" - - case "string": - return "dd.DecodeString()" - case "float32": - return "float32(dd.DecodeFloat(true))" - case "float64": - return "dd.DecodeFloat(false)" - case "bool": - return "dd.DecodeBool()" - default: - panic(errors.New("gen internal: unknown type for decode: " + s)) - } -} - -func genInternalSortType(s string, elem bool) string { - for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { - if strings.HasPrefix(s, v) { - if elem { - if v == "int" || v == "uint" || v == "float" { - return v + "64" - } else { - return v - } - } - return v + "Slice" - } - } - panic("sorttype: unexpected type: " + s) -} - -// var genInternalMu sync.Mutex -var genInternalV genInternal -var genInternalTmplFuncs template.FuncMap -var genInternalOnce sync.Once - -func genInternalInit() { - types := [...]string{ - "interface{}", - "string", - "float32", - "float64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "bool", - } - // keep as slice, so it is in specific iteration order. - // Initial order was uint64, string, interface{}, int, int64 - mapvaltypes := [...]string{ - "interface{}", - "string", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "float32", - "float64", - "bool", - } - wordSizeBytes := int(intBitsize) / 8 - - mapvaltypes2 := map[string]int{ - "interface{}": 2 * wordSizeBytes, - "string": 2 * wordSizeBytes, - "uint": 1 * wordSizeBytes, - "uint8": 1, - "uint16": 2, - "uint32": 4, - "uint64": 8, - "uintptr": 1 * wordSizeBytes, - "int": 1 * wordSizeBytes, - "int8": 1, - "int16": 2, - "int32": 4, - "int64": 8, - "float32": 4, - "float64": 8, - "bool": 1, - } - var gt genInternal - - // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function - for _, s := range types { - gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) - if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. - gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) - } - if _, ok := mapvaltypes2[s]; !ok { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) - } - for _, ms := range mapvaltypes { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) - } - } - - funcs := make(template.FuncMap) - // funcs["haspfx"] = strings.HasPrefix - funcs["encmd"] = genInternalEncCommandAsString - funcs["decmd"] = genInternalDecCommandAsString - funcs["zerocmd"] = genInternalZeroValue - funcs["hasprefix"] = strings.HasPrefix - funcs["sorttype"] = genInternalSortType - - genInternalV = gt - genInternalTmplFuncs = funcs -} - -// genInternalGoFile is used to generate source files from templates. -// It is run by the program author alone. -// Unfortunately, it has to be exported so that it can be called from a command line tool. -// *** DO NOT USE *** -func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { - genInternalOnce.Do(genInternalInit) - - gt := genInternalV - gt.Unsafe = !safe - - t := template.New("").Funcs(genInternalTmplFuncs) - - tmplstr, err := ioutil.ReadAll(r) - if err != nil { - return - } - - if t, err = t.Parse(string(tmplstr)); err != nil { - return - } - - var out bytes.Buffer - err = t.Execute(&out, gt) - if err != nil { - return - } - - bout, err := format.Source(out.Bytes()) - if err != nil { - w.Write(out.Bytes()) // write out if error, so we can still see. - // w.Write(bout) // write out if error, as much as possible, so we can still see. - return - } - w.Write(bout) - return -} diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/gen_15.go deleted file mode 100644 index ab76c3102..000000000 --- a/vendor/github.com/ugorji/go/codec/gen_15.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5,!go1.6 - -package codec - -import "os" - -func init() { - genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" -} diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/gen_16.go deleted file mode 100644 index 87c04e2e1..000000000 --- a/vendor/github.com/ugorji/go/codec/gen_16.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.6 - -package codec - -import "os" - -func init() { - genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" -} diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/gen_17.go deleted file mode 100644 index 3881a43ce..000000000 --- a/vendor/github.com/ugorji/go/codec/gen_17.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.7 - -package codec - -func init() { - genCheckVendor = true -} diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go deleted file mode 100644 index 8b94fc1e4..000000000 --- a/vendor/github.com/ugorji/go/codec/helper.go +++ /dev/null @@ -1,1314 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -// Some shared ideas around encoding/decoding -// ------------------------------------------ -// -// If an interface{} is passed, we first do a type assertion to see if it is -// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. -// -// If we start with a reflect.Value, we are already in reflect.Value land and -// will try to grab the function for the underlying Type and directly call that function. -// This is more performant than calling reflect.Value.Interface(). -// -// This still helps us bypass many layers of reflection, and give best performance. -// -// Containers -// ------------ -// Containers in the stream are either associative arrays (key-value pairs) or -// regular arrays (indexed by incrementing integers). -// -// Some streams support indefinite-length containers, and use a breaking -// byte-sequence to denote that the container has come to an end. -// -// Some streams also are text-based, and use explicit separators to denote the -// end/beginning of different values. -// -// During encode, we use a high-level condition to determine how to iterate through -// the container. That decision is based on whether the container is text-based (with -// separators) or binary (without separators). If binary, we do not even call the -// encoding of separators. -// -// During decode, we use a different high-level condition to determine how to iterate -// through the containers. That decision is based on whether the stream contained -// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that -// it has to be binary, and we do not even try to read separators. -// -// Philosophy -// ------------ -// On decode, this codec will update containers appropriately: -// - If struct, update fields from stream into fields of struct. -// If field in stream not found in struct, handle appropriately (based on option). -// If a struct field has no corresponding value in the stream, leave it AS IS. -// If nil in stream, set value to nil/zero value. -// - If map, update map from stream. -// If the stream value is NIL, set the map to nil. -// - if slice, try to update up to length of array in stream. -// if container len is less than stream array length, -// and container cannot be expanded, handled (based on option). -// This means you can decode 4-element stream array into 1-element array. -// -// ------------------------------------ -// On encode, user can specify omitEmpty. This means that the value will be omitted -// if the zero value. The problem may occur during decode, where omitted values do not affect -// the value being decoded into. This means that if decoding into a struct with an -// int field with current value=5, and the field is omitted in the stream, then after -// decoding, the value will still be 5 (not 0). -// omitEmpty only works if you guarantee that you always decode into zero-values. -// -// ------------------------------------ -// We could have truncated a map to remove keys not available in the stream, -// or set values in the struct which are not in the stream to their zero values. -// We decided against it because there is no efficient way to do it. -// We may introduce it as an option later. -// However, that will require enabling it for both runtime and code generation modes. -// -// To support truncate, we need to do 2 passes over the container: -// map -// - first collect all keys (e.g. in k1) -// - for each key in stream, mark k1 that the key should not be removed -// - after updating map, do second pass and call delete for all keys in k1 which are not marked -// struct: -// - for each field, track the *typeInfo s1 -// - iterate through all s1, and for each one not marked, set value to zero -// - this involves checking the possible anonymous fields which are nil ptrs. -// too much work. -// -// ------------------------------------------ -// Error Handling is done within the library using panic. -// -// This way, the code doesn't have to keep checking if an error has happened, -// and we don't have to keep sending the error value along with each call -// or storing it in the En|Decoder and checking it constantly along the way. -// -// The disadvantage is that small functions which use panics cannot be inlined. -// The code accounts for that by only using panics behind an interface; -// since interface calls cannot be inlined, this is irrelevant. -// -// We considered storing the error is En|Decoder. -// - once it has its err field set, it cannot be used again. -// - panicing will be optional, controlled by const flag. -// - code should always check error first and return early. -// We eventually decided against it as it makes the code clumsier to always -// check for these error conditions. - -import ( - "bytes" - "encoding" - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -const ( - scratchByteArrayLen = 32 - initCollectionCap = 32 // 32 is defensive. 16 is preferred. - - // Support encoding.(Binary|Text)(Unm|M)arshaler. - // This constant flag will enable or disable it. - supportMarshalInterfaces = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - // - // From benchmarks, slices with linear search perform better with < 32 entries. - // We have typically seen a high threshold of about 24 entries. - useMapForCodecCache = false - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true - - // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. - // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. - // The chances of this are slim, so leave this "optimization". - // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? - resetSliceElemToZeroValue bool = false -) - -var ( - oneByteArr = [1]byte{0} - zeroByteSlice = oneByteArr[:0:0] -) - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - // valueTypeInvalid = 0xff -) - -type seqType uint8 - -const ( - _ seqType = iota - seqTypeArray - seqTypeSlice - seqTypeChan -) - -// note that containerMapStart and containerArraySend are not sent. -// This is because the ReadXXXStart and EncodeXXXStart already does these. -type containerState uint8 - -const ( - _ containerState = iota - - containerMapStart // slot left open, since Driver method already covers it - containerMapKey - containerMapValue - containerMapEnd - containerArrayStart // slot left open, since Driver methods already cover it - containerArrayElem - containerArrayEnd -) - -// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo -type sfiIdx struct { - name string - index int -} - -// do not recurse if a containing type refers to an embedded type -// which refers back to its containing type (via a pointer). -// The second time this back-reference happens, break out, -// so as not to cause an infinite loop. -const rgetMaxRecursion = 2 - -// Anecdotally, we believe most types have <= 12 fields. -// Java's PMD rules set TooManyFields threshold to 15. -const rgetPoolTArrayLen = 12 - -type rgetT struct { - fNames []string - encNames []string - etypes []uintptr - sfis []*structFieldInfo -} - -type rgetPoolT struct { - fNames [rgetPoolTArrayLen]string - encNames [rgetPoolTArrayLen]string - etypes [rgetPoolTArrayLen]uintptr - sfis [rgetPoolTArrayLen]*structFieldInfo - sfiidx [rgetPoolTArrayLen]sfiIdx -} - -var rgetPool = sync.Pool{ - New: func() interface{} { return new(rgetPoolT) }, -} - -type containerStateRecv interface { - sendContainerState(containerState) -} - -// mirror json.Marshaler and json.Unmarshaler here, -// so we don't import the encoding/json package -type jsonMarshaler interface { - MarshalJSON() ([]byte, error) -} -type jsonUnmarshaler interface { - UnmarshalJSON([]byte) error -} - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - rawTyp = reflect.TypeOf(Raw{}) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - - binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() - - textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - - jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() - jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() - - selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - rawTypId = reflect.ValueOf(rawTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - stringTypId = reflect.ValueOf(stringTyp).Pointer() - - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - chkOvf checkOverflow - - noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") -) - -var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) - -// Selfer defines methods by which a value can encode or decode itself. -// -// Any type which implements Selfer will be able to encode or decode itself. -// Consequently, during (en|de)code, this takes precedence over -// (text|binary)(M|Unm)arshal or extension support. -type Selfer interface { - CodecEncodeSelf(*Encoder) - CodecDecodeSelf(*Decoder) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -// This affords storing a map in a specific sequence in the stream. -// -// The support of MapBySlice affords the following: -// - A slice type which implements MapBySlice will be encoded as a map -// - A slice can be decoded from a map in the stream -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - // TypeInfos is used to get the type info for any type. - // - // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json - TypeInfos *TypeInfos - - extHandle - EncodeOptions - DecodeOptions -} - -func (x *BasicHandle) getBasicHandle() *BasicHandle { - return x -} - -func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - if x.TypeInfos != nil { - return x.TypeInfos.get(rtid, rt) - } - return defTypeInfos.get(rtid, rt) -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - getBasicHandle() *BasicHandle - newEncDriver(w *Encoder) encDriver - newDecDriver(r *Decoder) decDriver - isBinary() bool -} - -// Raw represents raw formatted bytes. -// We "blindly" store it during encode and store the raw bytes during decode. -// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. -type Raw []byte - -// RawExt represents raw unprocessed extension data. -// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. -// -// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. -type RawExt struct { - Tag uint64 - // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. - // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types - Data []byte - // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. - Value interface{} -} - -// BytesExt handles custom (de)serialization of types to/from []byte. -// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. -type BytesExt interface { - // WriteExt converts a value to a []byte. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - WriteExt(v interface{}) []byte - - // ReadExt updates a value from a []byte. - ReadExt(dst interface{}, src []byte) -} - -// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. -// The Encoder or Decoder will then handle the further (de)serialization of that known type. -// -// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. -type InterfaceExt interface { - // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - ConvertExt(v interface{}) interface{} - - // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. - UpdateExt(dst interface{}, src interface{}) -} - -// Ext handles custom (de)serialization of custom types / extensions. -type Ext interface { - BytesExt - InterfaceExt -} - -// addExtWrapper is a wrapper implementation to support former AddExt exported method. -type addExtWrapper struct { - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -func (x addExtWrapper) WriteExt(v interface{}) []byte { - bs, err := x.encFn(reflect.ValueOf(v)) - if err != nil { - panic(err) - } - return bs -} - -func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { - if err := x.decFn(reflect.ValueOf(v), bs); err != nil { - panic(err) - } -} - -func (x addExtWrapper) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) -} - -func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { - x.ReadExt(dest, v.([]byte)) -} - -type setExtWrapper struct { - b BytesExt - i InterfaceExt -} - -func (x *setExtWrapper) WriteExt(v interface{}) []byte { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - } - return x.b.WriteExt(v) -} - -func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - - } - x.b.ReadExt(v, bs) -} - -func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { - if x.i == nil { - panic("InterfaceExt.ConvertExt is not supported") - - } - return x.i.ConvertExt(v) -} - -func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { - if x.i == nil { - panic("InterfaceExxt.UpdateExt is not supported") - - } - x.i.UpdateExt(dest, v) -} - -// type errorString string -// func (x errorString) Error() string { return string(x) } - -type binaryEncodingType struct{} - -func (_ binaryEncodingType) isBinary() bool { return true } - -type textEncodingType struct{} - -func (_ textEncodingType) isBinary() bool { return false } - -// noBuiltInTypes is embedded into many types which do not support builtins -// e.g. msgpack, simple, cbor. -type noBuiltInTypes struct{} - -func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } -func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} -func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} - -type noStreamingCodec struct{} - -func (_ noStreamingCodec) CheckBreak() bool { return false } - -// bigenHelper. -// Users must already slice the x completely, because we will not reslice. -type bigenHelper struct { - x []byte // must be correctly sliced to appropriate len. slicing is a cost. - w encWriter -} - -func (z bigenHelper) writeUint16(v uint16) { - bigen.PutUint16(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint32(v uint32) { - bigen.PutUint32(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint64(v uint64) { - bigen.PutUint64(z.x, v) - z.w.writeb(z.x) -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag uint64 - ext Ext -} - -type extHandle []extTypeTagFn - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// AddExt registes an encode and decode function for a reflect.Type. -// AddExt internally calls SetExt. -// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, tag byte, - encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, -) (err error) { - if encfn == nil || decfn == nil { - return o.SetExt(rt, uint64(tag), nil) - } - return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) -} - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call SetExt with nil Ext -func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.ext = tag, ext - return - } - } - - if *o == nil { - *o = make([]extTypeTagFn, 0, 4) - } - *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.tag == tag { - return v - } - } - return nil -} - -type structFieldInfo struct { - encName string // encode name - fieldName string // field name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? -} - -// func (si *structFieldInfo) isZero() bool { -// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray -// } - -// rv returns the field of the struct. -// If anonymous, it returns an Invalid -func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - return v - } - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - if !update { - return - } - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.Field(x) - } - return v -} - -func (si *structFieldInfo) setToZeroValue(v reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - v.Set(reflect.Zero(v.Type())) - // v.Set(reflect.New(v.Type()).Elem()) - // v.Set(reflect.New(v.Type())) - } else { - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return - } - v = v.Elem() - } - v = v.Field(x) - } - v.Set(reflect.Zero(v.Type())) - } -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - // if fname == "" { - // panic(noFieldNameToStructFieldInfoErr) - // } - si := structFieldInfo{ - encName: fname, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - if s == "omitempty" { - si.omitEmpty = true - } else if s == "toarray" { - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - - numMeth uint16 // number of methods - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - bm bool // base type (T or *T) is a binaryMarshaler - bunm bool // base type (T or *T) is a binaryUnmarshaler - bmIndir int8 // number of indirections to get to binaryMarshaler type - bunmIndir int8 // number of indirections to get to binaryUnmarshaler type - - tm bool // base type (T or *T) is a textMarshaler - tunm bool // base type (T or *T) is a textUnmarshaler - tmIndir int8 // number of indirections to get to textMarshaler type - tunmIndir int8 // number of indirections to get to textUnmarshaler type - - jm bool // base type (T or *T) is a jsonMarshaler - junm bool // base type (T or *T) is a jsonUnmarshaler - jmIndir int8 // number of indirections to get to jsonMarshaler type - junmIndir int8 // number of indirections to get to jsonUnmarshaler type - - cs bool // base type (T or *T) is a Selfer - csIndir int8 // number of indirections to get to Selfer type - - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - // NOTE: name may be a stringView, so don't pass it to another function. - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 -} - -// TypeInfos caches typeInfo for each type on first inspection. -// -// It is configured with a set of tag keys, which are used to get -// configuration for the type. -type TypeInfos struct { - infos map[uintptr]*typeInfo - mu sync.RWMutex - tags []string -} - -// NewTypeInfos creates a TypeInfos given a set of struct tags keys. -// -// This allows users customize the struct tag keys which contain configuration -// of their types. -func NewTypeInfos(tags []string) *TypeInfos { - return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} -} - -func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { - // check for tags: codec, json, in that order. - // this allows seamless support for many configured structs. - for _, x := range x.tags { - s = t.Get(x) - if s != "" { - return s - } - } - return -} - -func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - x.mu.RLock() - pti, ok = x.infos[rtid] - x.mu.RUnlock() - if ok { - return - } - - // do not hold lock while computing this. - // it may lead to duplication, but that's ok. - ti := typeInfo{rt: rt, rtid: rtid} - ti.numMeth = uint16(rt.NumMethod()) - - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.bm, ti.bmIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.bunm, ti.bunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { - ti.tm, ti.tmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { - ti.tunm, ti.tunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { - ti.jm, ti.jmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { - ti.junm, ti.junmIndir = true, indir - } - if ok, indir = implementsIntf(rt, selferTyp); ok { - ti.cs, ti.csIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var omitEmpty bool - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) - ti.toArray = siInfo.toArray - omitEmpty = siInfo.omitEmpty - } - pi := rgetPool.Get() - pv := pi.(*rgetPoolT) - pv.etypes[0] = ti.baseId - vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} - x.rget(rt, rtid, omitEmpty, nil, &vv) - ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) - rgetPool.Put(pi) - } - // sfi = sfip - - x.mu.Lock() - if pti, ok = x.infos[rtid]; !ok { - pti = &ti - x.infos[rtid] = pti - } - x.mu.Unlock() - return -} - -func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, - indexstack []int, pv *rgetT, -) { - // Read up fields and store how to access the value. - // - // It uses go's rules for message selectors, - // which say that the field with the shallowest depth is selected. - // - // Note: we consciously use slices, not a map, to simulate a set. - // Typically, types have < 16 fields, - // and iteration using equals is faster than maps there - -LOOP: - for j, jlen := 0, rt.NumField(); j < jlen; j++ { - f := rt.Field(j) - fkind := f.Type.Kind() - // skip if a func type, or is unexported, or structTag value == "-" - switch fkind { - case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: - continue LOOP - } - - // if r1, _ := utf8.DecodeRuneInString(f.Name); - // r1 == utf8.RuneError || !unicode.IsUpper(r1) { - if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded - continue - } - stag := x.structTag(f.Tag) - if stag == "-" { - continue - } - var si *structFieldInfo - // if anonymous and no struct tag (or it's blank), - // and a struct (or pointer to struct), inline it. - if f.Anonymous && fkind != reflect.Interface { - doInline := stag == "" - if !doInline { - si = parseStructFieldInfo("", stag) - doInline = si.encName == "" - // doInline = si.isZero() - } - if doInline { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - // if etypes contains this, don't call rget again (as fields are already seen here) - ftid := reflect.ValueOf(ft).Pointer() - // We cannot recurse forever, but we need to track other field depths. - // So - we break if we see a type twice (not the first time). - // This should be sufficient to handle an embedded type that refers to its - // owning type, which then refers to its embedded type. - processIt := true - numk := 0 - for _, k := range pv.etypes { - if k == ftid { - numk++ - if numk == rgetMaxRecursion { - processIt = false - break - } - } - } - if processIt { - pv.etypes = append(pv.etypes, ftid) - indexstack2 := make([]int, len(indexstack)+1) - copy(indexstack2, indexstack) - indexstack2[len(indexstack)] = j - // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - x.rget(ft, ftid, omitEmpty, indexstack2, pv) - } - continue - } - } - } - - // after the anonymous dance: if an unexported field, skip - if f.PkgPath != "" { // unexported - continue - } - - if f.Name == "" { - panic(noFieldNameToStructFieldInfoErr) - } - - pv.fNames = append(pv.fNames, f.Name) - - if si == nil { - si = parseStructFieldInfo(f.Name, stag) - } else if si.encName == "" { - si.encName = f.Name - } - si.fieldName = f.Name - - pv.encNames = append(pv.encNames, si.encName) - - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = make([]int, len(indexstack)+1) - copy(si.is, indexstack) - si.is[len(indexstack)] = j - // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if omitEmpty { - si.omitEmpty = true - } - pv.sfis = append(pv.sfis, si) - } -} - -// resolves the struct field info got from a call to rget. -// Returns a trimmed, unsorted and sorted []*structFieldInfo. -func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { - var n int - for i, v := range x { - xn := v.encName //TODO: fieldName or encName? use encName for now. - var found bool - for j, k := range pv { - if k.name == xn { - // one of them must be reset to nil, and the index updated appropriately to the other one - if len(v.is) == len(x[k.index].is) { - } else if len(v.is) < len(x[k.index].is) { - pv[j].index = i - if x[k.index] != nil { - x[k.index] = nil - n++ - } - } else { - if x[i] != nil { - x[i] = nil - n++ - } - } - found = true - break - } - } - if !found { - pv = append(pv, sfiIdx{xn, i}) - } - } - - // remove all the nils - y = make([]*structFieldInfo, len(x)-n) - n = 0 - for _, v := range x { - if v == nil { - continue - } - y[n] = v - n++ - } - - z = make([]*structFieldInfo, len(y)) - copy(z, y) - sort.Sort(sfiSortedByEncName(z)) - return -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } -} - -// func doPanic(tag string, format string, params ...interface{}) { -// params2 := make([]interface{}, len(params)+1) -// params2[0] = tag -// copy(params2[1:], params) -// panic(fmt.Errorf("%s: "+format, params2...)) -// } - -func isImmutableKind(k reflect.Kind) (v bool) { - return false || - k == reflect.Int || - k == reflect.Int8 || - k == reflect.Int16 || - k == reflect.Int32 || - k == reflect.Int64 || - k == reflect.Uint || - k == reflect.Uint8 || - k == reflect.Uint16 || - k == reflect.Uint32 || - k == reflect.Uint64 || - k == reflect.Uintptr || - k == reflect.Float32 || - k == reflect.Float64 || - k == reflect.Bool || - k == reflect.String -} - -// these functions must be inlinable, and not call anybody -type checkOverflow struct{} - -func (_ checkOverflow) Float32(f float64) (overflow bool) { - if f < 0 { - f = -f - } - return math.MaxFloat32 < f && f <= math.MaxFloat64 -} - -func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { - //e.g. -127 to 128 for int8 - pos := (v >> 63) == 0 - ui2 := v & 0x7fffffffffffffff - if pos { - if ui2 > math.MaxInt64 { - overflow = true - return - } - } else { - if ui2 > math.MaxInt64-1 { - overflow = true - return - } - } - i = int64(v) - return -} - -// ------------------ SORT ----------------- - -func isNaN(f float64) bool { return f != f } - -// ----------------------- - -type intSlice []int64 -type uintSlice []uint64 -type floatSlice []float64 -type boolSlice []bool -type stringSlice []string -type bytesSlice [][]byte - -func (p intSlice) Len() int { return len(p) } -func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintSlice) Len() int { return len(p) } -func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatSlice) Len() int { return len(p) } -func (p floatSlice) Less(i, j int) bool { - return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) -} -func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringSlice) Len() int { return len(p) } -func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesSlice) Len() int { return len(p) } -func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } -func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolSlice) Len() int { return len(p) } -func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } -func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// --------------------- - -type intRv struct { - v int64 - r reflect.Value -} -type intRvSlice []intRv -type uintRv struct { - v uint64 - r reflect.Value -} -type uintRvSlice []uintRv -type floatRv struct { - v float64 - r reflect.Value -} -type floatRvSlice []floatRv -type boolRv struct { - v bool - r reflect.Value -} -type boolRvSlice []boolRv -type stringRv struct { - v string - r reflect.Value -} -type stringRvSlice []stringRv -type bytesRv struct { - v []byte - r reflect.Value -} -type bytesRvSlice []bytesRv - -func (p intRvSlice) Len() int { return len(p) } -func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintRvSlice) Len() int { return len(p) } -func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatRvSlice) Len() int { return len(p) } -func (p floatRvSlice) Less(i, j int) bool { - return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) -} -func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringRvSlice) Len() int { return len(p) } -func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesRvSlice) Len() int { return len(p) } -func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolRvSlice) Len() int { return len(p) } -func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } -func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type bytesI struct { - v []byte - i interface{} -} - -type bytesISlice []bytesI - -func (p bytesISlice) Len() int { return len(p) } -func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type set []uintptr - -func (s *set) add(v uintptr) (exists bool) { - // e.ci is always nil, or len >= 1 - // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() - x := *s - if x == nil { - x = make([]uintptr, 1, 8) - x[0] = v - *s = x - return - } - // typically, length will be 1. make this perform. - if len(x) == 1 { - if j := x[0]; j == 0 { - x[0] = v - } else if j == v { - exists = true - } else { - x = append(x, v) - *s = x - } - return - } - // check if it exists - for _, j := range x { - if j == v { - exists = true - return - } - } - // try to replace a "deleted" slot - for i, j := range x { - if j == 0 { - x[i] = v - return - } - } - // if unable to replace deleted slot, just append it. - x = append(x, v) - *s = x - return -} - -func (s *set) remove(v uintptr) (exists bool) { - // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() - x := *s - if len(x) == 0 { - return - } - if len(x) == 1 { - if x[0] == v { - x[0] = 0 - } - return - } - for i, j := range x { - if j == v { - exists = true - x[i] = 0 // set it to 0, as way to delete it. - // copy(x[i:], x[i+1:]) - // x = x[:len(x)-1] - return - } - } - return -} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go deleted file mode 100644 index 5d0727f77..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_internal.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -func panicValToErr(panicVal interface{}, err *error) { - if panicVal == nil { - return - } - // case nil - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - return -} - -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() - } - case reflect.Struct: - if !checkStruct { - return false - } - // return true if all fields are empty. else return false. - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - return hIsEmptyValue(v, deref, checkStruct) -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} - -// validate that this function is correct ... -// culled from OGRE (Object-Oriented Graphics Rendering Engine) -// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) -func halfFloatToFloatBits(yy uint16) (d uint32) { - y := uint32(yy) - s := (y >> 15) & 0x01 - e := (y >> 10) & 0x1f - m := y & 0x03ff - - if e == 0 { - if m == 0 { // plu or minus 0 - return s << 31 - } else { // Denormalized number -- renormalize it - for (m & 0x00000400) == 0 { - m <<= 1 - e -= 1 - } - e += 1 - const zz uint32 = 0x0400 - m &= ^zz - } - } else if e == 31 { - if m == 0 { // Inf - return (s << 31) | 0x7f800000 - } else { // NaN - return (s << 31) | 0x7f800000 | (m << 13) - } - } - e = e + (127 - 15) - m = m << 13 - return (s << 31) | (e << 23) | m -} - -// GrowCap will return a new capacity for a slice, given the following: -// - oldCap: current capacity -// - unit: in-memory size of an element -// - num: number of elements to add -func growCap(oldCap, unit, num int) (newCap int) { - // appendslice logic (if cap < 1024, *2, else *1.25): - // leads to many copy calls, especially when copying bytes. - // bytes.Buffer model (2*cap + n): much better for bytes. - // smarter way is to take the byte-size of the appended element(type) into account - - // maintain 3 thresholds: - // t1: if cap <= t1, newcap = 2x - // t2: if cap <= t2, newcap = 1.75x - // t3: if cap <= t3, newcap = 1.5x - // else newcap = 1.25x - // - // t1, t2, t3 >= 1024 always. - // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) - // - // With this, appending for bytes increase by: - // 100% up to 4K - // 75% up to 8K - // 50% up to 16K - // 25% beyond that - - // unit can be 0 e.g. for struct{}{}; handle that appropriately - var t1, t2, t3 int // thresholds - if unit <= 1 { - t1, t2, t3 = 4*1024, 8*1024, 16*1024 - } else if unit < 16 { - t3 = 16 / unit * 1024 - t1 = t3 * 1 / 4 - t2 = t3 * 2 / 4 - } else { - t1, t2, t3 = 1024, 1024, 1024 - } - - var x int // temporary variable - - // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively - if oldCap <= t1 { // [0,t1] - x = 8 - } else if oldCap > t3 { // (t3,infinity] - x = 5 - } else if oldCap <= t2 { // (t1,t2] - x = 7 - } else { // (t2,t3] - x = 6 - } - newCap = x * oldCap / 4 - - if num > 0 { - newCap += num - } - - // ensure newCap is a multiple of 64 (if it is > 64) or 16. - if newCap > 64 { - if x = newCap % 64; x != 0 { - x = newCap / 64 - newCap = 64 * (x + 1) - } - } else { - if x = newCap % 16; x != 0 { - x = newCap / 16 - newCap = 16 * (x + 1) - } - } - return -} - -func expandSliceValue(s reflect.Value, num int) reflect.Value { - if num <= 0 { - return s - } - l0 := s.Len() - l1 := l0 + num // new slice length - if l1 < l0 { - panic("ExpandSlice: slice overflow") - } - c0 := s.Cap() - if l1 <= c0 { - return s.Slice(0, l1) - } - st := s.Type() - c1 := growCap(c0, int(st.Elem().Size()), num) - s2 := reflect.MakeSlice(st, l1, c1) - // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) - reflect.Copy(s2, s) - return s2 -} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go deleted file mode 100644 index 8b06a0045..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !unsafe - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func stringView(v []byte) string { - return string(v) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func bytesView(v string) []byte { - return []byte(v) -} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go deleted file mode 100644 index 0f596c71a..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build unsafe - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "unsafe" -) - -// This file has unsafe variants of some helper methods. - -type unsafeString struct { - Data uintptr - Len int -} - -type unsafeSlice struct { - Data uintptr - Len int - Cap int -} - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func stringView(v []byte) string { - if len(v) == 0 { - return "" - } - - bx := (*unsafeSlice)(unsafe.Pointer(&v)) - sx := unsafeString{bx.Data, bx.Len} - return *(*string)(unsafe.Pointer(&sx)) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func bytesView(v string) []byte { - if len(v) == 0 { - return zeroByteSlice - } - - sx := (*unsafeString)(unsafe.Pointer(&v)) - bx := unsafeSlice{sx.Data, sx.Len, sx.Len} - return *(*[]byte)(unsafe.Pointer(&bx)) -} diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go deleted file mode 100644 index 5bb389628..000000000 --- a/vendor/github.com/ugorji/go/codec/json.go +++ /dev/null @@ -1,1234 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// By default, this json support uses base64 encoding for bytes, because you cannot -// store and read any arbitrary string in json (only unicode). -// However, the user can configre how to encode/decode bytes. -// -// This library specifically supports UTF-8 for encoding and decoding only. -// -// Note that the library will happily encode/decode things which are not valid -// json e.g. a map[int64]string. We do it for consistency. With valid json, -// we will encode and decode appropriately. -// Users can specify their map type if necessary to force it. -// -// Note: -// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. -// We implement it here. -// - Also, strconv.ParseXXX for floats and integers -// - only works on strings resulting in unnecessary allocation and []byte-string conversion. -// - it does a lot of redundant checks, because json numbers are simpler that what it supports. -// - We parse numbers (floats and integers) directly here. -// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. -// In that case, we delegate to strconv.ParseFloat. -// -// Note: -// - encode does not beautify. There is no whitespace when encoding. -// - rpc calls which take single integer arguments or write single numeric arguments will need care. - -// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver -// MUST not call one-another. - -import ( - "bytes" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "unicode/utf16" - "unicode/utf8" -) - -//-------------------------------- - -var ( - jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} - - jsonFloat64Pow10 = [...]float64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - 1e20, 1e21, 1e22, - } - - jsonUint64Pow10 = [...]uint64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - } - - // jsonTabs and jsonSpaces are used as caches for indents - jsonTabs, jsonSpaces string -) - -const ( - // jsonUnreadAfterDecNum controls whether we unread after decoding a number. - // - // instead of unreading, just update d.tok (iff it's not a whitespace char) - // However, doing this means that we may HOLD onto some data which belongs to another stream. - // Thus, it is safest to unread the data when done. - // keep behind a constant flag for now. - jsonUnreadAfterDecNum = true - - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: - // - If we see first character of null, false or true, - // do not validate subsequent characters. - // - e.g. if we see a n, assume null and skip next 3 characters, - // and do not validate they are ull. - // P.S. Do not expect a significant decoding boost from this. - jsonValidateSymbols = true - - // if jsonTruncateMantissa, truncate mantissa if trailing 0's. - // This is important because it could allow some floats to be decoded without - // deferring to strconv.ParseFloat. - jsonTruncateMantissa = true - - // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow - jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - - // if mantissa >= jsonNumUintMaxVal, this is an overflow - jsonNumUintMaxVal = 1< 1<<53 || v < -(1<<53)) { - e.w.writen1('"') - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) - e.w.writen1('"') - return - } - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) -} - -func (e *jsonEncDriver) EncodeUint(v uint64) { - if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { - e.w.writen1('"') - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) - e.w.writen1('"') - return - } - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) -} - -func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - if v := ext.ConvertExt(rv); v == nil { - e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - // only encodes re.Value (never re.Data) - if re.Value == nil { - e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *jsonEncDriver) EncodeArrayStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('[') - e.c = containerArrayStart -} - -func (e *jsonEncDriver) EncodeMapStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('{') - e.c = containerMapStart -} - -func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { - // e.w.writestr(strconv.Quote(v)) - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeSymbol(v string) { - // e.EncodeString(c_UTF8, v) - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - // if encoding raw bytes and RawBytesExt is configured, use it to encode - if c == c_RAW && e.se.i != nil { - e.EncodeExt(v, 0, &e.se, e.e) - return - } - if c == c_RAW { - slen := base64.StdEncoding.EncodedLen(len(v)) - if cap(e.bs) >= slen { - e.bs = e.bs[:slen] - } else { - e.bs = make([]byte, slen) - } - base64.StdEncoding.Encode(e.bs, v) - e.w.writen1('"') - e.w.writeb(e.bs) - e.w.writen1('"') - } else { - // e.EncodeString(c, string(v)) - e.quoteStr(stringView(v)) - } -} - -func (e *jsonEncDriver) EncodeAsis(v []byte) { - e.w.writeb(v) -} - -func (e *jsonEncDriver) quoteStr(s string) { - // adapted from std pkg encoding/json - const hex = "0123456789abcdef" - w := e.w - w.writen1('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - w.writestr(s[start:i]) - } - switch b { - case '\\', '"': - w.writen2('\\', b) - case '\n': - w.writen2('\\', 'n') - case '\r': - w.writen2('\\', 'r') - case '\b': - w.writen2('\\', 'b') - case '\f': - w.writen2('\\', 'f') - case '\t': - w.writen2('\\', 't') - default: - // encode all bytes < 0x20 (except \r, \n). - // also encode < > & to prevent security holes when served to some browsers. - w.writestr(`\u00`) - w.writen2(hex[b>>4], hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. - // Both technically valid JSON, but bomb on JSONP, so fix here. - if c == '\u2028' || c == '\u2029' { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\u202`) - w.writen1(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - w.writestr(s[start:]) - } - w.writen1('"') -} - -//-------------------------------- - -type jsonNum struct { - // bytes []byte // may have [+-.eE0-9] - mantissa uint64 // where mantissa ends, and maybe dot begins. - exponent int16 // exponent value. - manOverflow bool - neg bool // started with -. No initial sign in the bytes above. - dot bool // has dot - explicitExponent bool // explicit exponent -} - -func (x *jsonNum) reset() { - x.manOverflow = false - x.neg = false - x.dot = false - x.explicitExponent = false - x.mantissa = 0 - x.exponent = 0 -} - -// uintExp is called only if exponent > 0. -func (x *jsonNum) uintExp() (n uint64, overflow bool) { - n = x.mantissa - e := x.exponent - if e >= int16(len(jsonUint64Pow10)) { - overflow = true - return - } - n *= jsonUint64Pow10[e] - if n < x.mantissa || n > jsonNumUintMaxVal { - overflow = true - return - } - return - // for i := int16(0); i < e; i++ { - // if n >= jsonNumUintCutoff { - // overflow = true - // return - // } - // n *= 10 - // } - // return -} - -// these constants are only used withn floatVal. -// They are brought out, so that floatVal can be inlined. -const ( - jsonUint64MantissaBits = 52 - jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 -) - -func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { - // We do not want to lose precision. - // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: - // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) - // We expect up to 99.... (19 digits) - // - The mantissa cannot fit into a 52 bits of uint64 - // - The exponent is beyond our scope ie beyong 22. - parseUsingStrConv = x.manOverflow || - x.exponent > jsonMaxExponent || - (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || - x.mantissa>>jsonUint64MantissaBits != 0 - - if parseUsingStrConv { - return - } - - // all good. so handle parse here. - f = float64(x.mantissa) - // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) - if x.neg { - f = -f - } - if x.exponent > 0 { - f *= jsonFloat64Pow10[x.exponent] - } else if x.exponent < 0 { - f /= jsonFloat64Pow10[-x.exponent] - } - return -} - -type jsonDecDriver struct { - noBuiltInTypes - d *Decoder - h *JsonHandle - r decReader - - c containerState - // tok is used to store the token read right after skipWhiteSpace. - tok uint8 - - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch, used for parsing strings or numbers - b2 [64]byte // scratch, used only for decodeBytes (after base64) - bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. - - se setExtWrapper - - n jsonNum -} - -func jsonIsWS(b byte) bool { - return b == ' ' || b == '\t' || b == '\r' || b == '\n' -} - -// // This will skip whitespace characters and return the next byte to read. -// // The next byte determines what the value will be one of. -// func (d *jsonDecDriver) skipWhitespace() { -// // fast-path: do not enter loop. Just check first (in case no whitespace). -// b := d.r.readn1() -// if jsonIsWS(b) { -// r := d.r -// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { -// } -// } -// d.tok = b -// } - -func (d *jsonDecDriver) uncacheRead() { - if d.tok != 0 { - d.r.unreadn1() - d.tok = 0 - } -} - -func (d *jsonDecDriver) sendContainerState(c containerState) { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - var xc uint8 // char expected - if c == containerMapKey { - if d.c != containerMapStart { - xc = ',' - } - } else if c == containerMapValue { - xc = ':' - } else if c == containerMapEnd { - xc = '}' - } else if c == containerArrayElem { - if d.c != containerArrayStart { - xc = ',' - } - } else if c == containerArrayEnd { - xc = ']' - } - if xc != 0 { - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = c -} - -func (d *jsonDecDriver) CheckBreak() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == '}' || d.tok == ']' { - // d.tok = 0 // only checking, not consuming - return true - } - return false -} - -func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { - bs := d.r.readx(int(toIdx - fromIdx)) - d.tok = 0 - if jsonValidateSymbols { - if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { - d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) - return - } - } -} - -func (d *jsonDecDriver) TryDecodeAsNil() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'n' { - d.readStrIdx(10, 13) // ull - return true - } - return false -} - -func (d *jsonDecDriver) DecodeBool() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'f' { - d.readStrIdx(5, 9) // alse - return false - } - if d.tok == 't' { - d.readStrIdx(1, 4) // rue - return true - } - d.d.errorf("json: decode bool: got first char %c", d.tok) - return false // "unreachable" -} - -func (d *jsonDecDriver) ReadMapStart() int { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '{' { - d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) - } - d.tok = 0 - d.c = containerMapStart - return -1 -} - -func (d *jsonDecDriver) ReadArrayStart() int { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '[' { - d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) - } - d.tok = 0 - d.c = containerArrayStart - return -1 -} - -func (d *jsonDecDriver) ContainerType() (vt valueType) { - // check container type by checking the first char - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if b := d.tok; b == '{' { - return valueTypeMap - } else if b == '[' { - return valueTypeArray - } else if b == 'n' { - return valueTypeNil - } else if b == '"' { - return valueTypeString - } - return valueTypeUnset - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // return false // "unreachable" -} - -func (d *jsonDecDriver) decNum(storeBytes bool) { - // If it is has a . or an e|E, decode as a float; else decode as an int. - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - b := d.tok - var str bool - if b == '"' { - str = true - b = d.r.readn1() - } - if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { - d.d.errorf("json: decNum: got first char '%c'", b) - return - } - d.tok = 0 - - const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { - n.manOverflow = true - break - } - v := uint64(b - '0') - n.mantissa *= 10 - if v != 0 { - n1 := n.mantissa + v - if n1 < n.mantissa || n1 > jsonNumUintMaxVal { - n.manOverflow = true // n+v overflows - break - } - n.mantissa = n1 - } - case 6: - state = 7 - fallthrough - case 7: - if !(b == '0' && e == 0) { - e = e*10 + int16(b-'0') - } - default: - break LOOP - } - case '"': - if str { - if storeBytes { - d.bs = append(d.bs, '"') - } - b, eof = r.readn1eof() - } - break LOOP - default: - break LOOP - } - if storeBytes { - d.bs = append(d.bs, b) - } - b, eof = r.readn1eof() - } - - if jsonTruncateMantissa && n.mantissa != 0 { - for n.mantissa%10 == 0 { - n.mantissa /= 10 - n.exponent++ - } - } - - if e != 0 { - if eNeg { - n.exponent -= e - } else { - n.exponent += e - } - } - - // d.n = n - - if !eof { - if jsonUnreadAfterDecNum { - r.unreadn1() - } else { - if !jsonIsWS(b) { - d.tok = b - } - } - } - // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", - // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) - return -} - -func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { - d.decNum(false) - n := &d.n - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - var u uint64 - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - i = int64(u) - if n.neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeInt: %v\n", i) - return -} - -// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number -func (d *jsonDecDriver) floatVal() (f float64) { - f, useStrConv := d.n.floatVal() - if useStrConv { - var err error - if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { - panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) - } - if d.n.neg { - f = -f - } - } - return -} - -func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { - d.decNum(false) - n := &d.n - if n.neg { - d.d.errorf("json: unsigned integer cannot be negative") - return - } - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - if chkOvf.Uint(u, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeUint: %v\n", u) - return -} - -func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - d.decNum(true) - f = d.floatVal() - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("json: overflow float32: %v, %s", f, d.bs) - return - } - return -} - -func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if ext == nil { - re := rv.(*RawExt) - re.Tag = xtag - d.d.decode(&re.Value) - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - return -} - -func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. - if !isstring && d.se.i != nil { - bsOut = bs - d.DecodeExt(&bsOut, 0, &d.se) - return - } - d.appendStringAsBytes() - // if isstring, then just return the bytes, even if it is using the scratch buffer. - // the bytes will be converted to a string as needed. - if isstring { - return d.bs - } - // if appendStringAsBytes returned a zero-len slice, then treat as nil. - // This should only happen for null, and "". - if len(d.bs) == 0 { - return nil - } - bs0 := d.bs - slen := base64.StdEncoding.DecodedLen(len(bs0)) - if slen <= cap(bs) { - bsOut = bs[:slen] - } else if zerocopy && slen <= cap(d.b2) { - bsOut = d.b2[:slen] - } else { - bsOut = make([]byte, slen) - } - slen2, err := base64.StdEncoding.Decode(bsOut, bs0) - if err != nil { - d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) - return nil - } - if slen != slen2 { - bsOut = bsOut[:slen2] - } - return -} - -func (d *jsonDecDriver) DecodeString() (s string) { - d.appendStringAsBytes() - // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key - if d.c == containerMapKey { - return d.d.string(d.bs) - } - return string(d.bs) -} - -func (d *jsonDecDriver) appendStringAsBytes() { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - - // handle null as a string - if d.tok == 'n' { - d.readStrIdx(10, 13) // ull - d.bs = d.bs[:0] - return - } - - if d.tok != '"' { - d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) - } - d.tok = 0 - - v := d.bs[:0] - var c uint8 - r := d.r - for { - c = r.readn1() - if c == '"' { - break - } else if c == '\\' { - c = r.readn1() - switch c { - case '"', '\\', '/', '\'': - v = append(v, c) - case 'b': - v = append(v, '\b') - case 'f': - v = append(v, '\f') - case 'n': - v = append(v, '\n') - case 'r': - v = append(v, '\r') - case 't': - v = append(v, '\t') - case 'u': - rr := d.jsonU4(false) - // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) - if utf16.IsSurrogate(rr) { - rr = utf16.DecodeRune(rr, d.jsonU4(true)) - } - w2 := utf8.EncodeRune(d.bstr[:], rr) - v = append(v, d.bstr[:w2]...) - default: - d.d.errorf("json: unsupported escaped value: %c", c) - } - } else { - v = append(v, c) - } - } - d.bs = v -} - -func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { - r := d.r - if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { - d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) - return 0 - } - // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) - var u uint32 - for i := 0; i < 4; i++ { - v := r.readn1() - if '0' <= v && v <= '9' { - v = v - '0' - } else if 'a' <= v && v <= 'z' { - v = v - 'a' + 10 - } else if 'A' <= v && v <= 'Z' { - v = v - 'A' + 10 - } else { - d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) - return 0 - } - u = u*16 + uint32(v) - } - return rune(u) -} - -func (d *jsonDecDriver) DecodeNaked() { - z := &d.d.n - // var decodeFurther bool - - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - switch d.tok { - case 'n': - d.readStrIdx(10, 13) // ull - z.v = valueTypeNil - case 'f': - d.readStrIdx(5, 9) // alse - z.v = valueTypeBool - z.b = false - case 't': - d.readStrIdx(1, 4) // rue - z.v = valueTypeBool - z.b = true - case '{': - z.v = valueTypeMap - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart - // decodeFurther = true - case '[': - z.v = valueTypeArray - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart - // decodeFurther = true - case '"': - z.v = valueTypeString - z.s = d.DecodeString() - default: // number - d.decNum(true) - n := &d.n - // if the string had a any of [.eE], then decode as float. - switch { - case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.exponent == 0: - u := n.mantissa - switch { - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u - } - default: - u, overflow := n.uintExp() - switch { - case overflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u - } - } - // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) - } - // if decodeFurther { - // d.s.sc.retryRead() - // } - return -} - -//---------------------- - -// JsonHandle is a handle for JSON encoding format. -// -// Json is comprehensively supported: -// - decodes numbers into interface{} as int, uint or float64 -// - configurable way to encode/decode []byte . -// by default, encodes and decodes []byte using base64 Std Encoding -// - UTF-8 support for encoding and decoding -// -// It has better performance than the json library in the standard library, -// by leveraging the performance improvements of the codec library and -// minimizing allocations. -// -// In addition, it doesn't read more bytes than necessary during a decode, which allows -// reading multiple values from a stream containing json and non-json content. -// For example, a user can read a json value, then a cbor value, then a msgpack value, -// all from the same stream in sequence. -type JsonHandle struct { - textEncodingType - BasicHandle - // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. - // If not configured, raw bytes are encoded to/from base64 text. - RawBytesExt InterfaceExt - - // Indent indicates how a value is encoded. - // - If positive, indent by that number of spaces. - // - If negative, indent by that number of tabs. - Indent int8 - - // IntegerAsString controls how integers (signed and unsigned) are encoded. - // - // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. - // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. - // This can be mitigated by configuring how to encode integers. - // - // IntegerAsString interpretes the following values: - // - if 'L', then encode integers > 2^53 as a json string. - // - if 'A', then encode all integers as a json string - // containing the exact integer representation as a decimal. - // - else encode all integers as a json number (default) - IntegerAsString uint8 -} - -func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - hd := jsonEncDriver{e: e, h: h} - hd.bs = hd.b[:0] - - hd.reset() - - return &hd -} - -func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { - // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, h: h} - hd.bs = hd.b[:0] - hd.reset() - return &hd -} - -func (e *jsonEncDriver) reset() { - e.w = e.e.w - e.se.i = e.h.RawBytesExt - if e.bs != nil { - e.bs = e.bs[:0] - } - e.d, e.dt, e.dl, e.ds = false, false, 0, "" - e.c = 0 - if e.h.Indent > 0 { - e.d = true - e.ds = jsonSpaces[:e.h.Indent] - } else if e.h.Indent < 0 { - e.d = true - e.dt = true - e.ds = jsonTabs[:-(e.h.Indent)] - } -} - -func (d *jsonDecDriver) reset() { - d.r = d.d.r - d.se.i = d.h.RawBytesExt - if d.bs != nil { - d.bs = d.bs[:0] - } - d.c, d.tok = 0, 0 - d.n.reset() -} - -var jsonEncodeTerminate = []byte{' '} - -func (h *JsonHandle) rpcEncodeTerminate() []byte { - return jsonEncodeTerminate -} - -var _ decDriver = (*jsonDecDriver)(nil) -var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go deleted file mode 100644 index e79830b56..000000000 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" - "reflect" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - w encWriter - h *MsgpackHandle - x [8]byte -} - -func (e *msgpackEncDriver) EncodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) EncodeInt(i int64) { - if i >= 0 { - e.EncodeUint(uint64(i)) - } else if i >= -32 { - e.w.writen1(byte(i)) - } else if i >= math.MinInt8 { - e.w.writen2(mpInt8, byte(i)) - } else if i >= math.MinInt16 { - e.w.writen1(mpInt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i >= math.MinInt32 { - e.w.writen1(mpInt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpInt64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeUint(i uint64) { - if i <= math.MaxInt8 { - e.w.writen1(byte(i)) - } else if i <= math.MaxUint8 { - e.w.writen2(mpUint8, byte(i)) - } else if i <= math.MaxUint16 { - e.w.writen1(mpUint16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i <= math.MaxUint32 { - e.w.writen1(mpUint32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpUint64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) EncodeFloat32(f float32) { - e.w.writen1(mpFloat) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) EncodeFloat64(f float64) { - e.w.writen1(mpDouble) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(v) - if bs == nil { - e.EncodeNil() - return - } - if e.h.WriteExt { - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) - } else { - e.EncodeStringBytes(c_RAW, bs) - } -} - -func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - if l == 1 { - e.w.writen2(mpFixExt1, xtag) - } else if l == 2 { - e.w.writen2(mpFixExt2, xtag) - } else if l == 4 { - e.w.writen2(mpFixExt4, xtag) - } else if l == 8 { - e.w.writen2(mpFixExt8, xtag) - } else if l == 16 { - e.w.writen2(mpFixExt16, xtag) - } else if l < 256 { - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - } else if l < 65536 { - e.w.writen1(mpExt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - e.w.writen1(xtag) - } else { - e.w.writen1(mpExt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) EncodeArrayStart(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) EncodeMapStart(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) - } else { - e.writeContainerLen(msgpackContainerStr, len(s)) - } - if len(s) > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) - } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) - } - if len(bs) > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - if ct.hasFixMin && l < ct.fixCutoff { - e.w.writen1(ct.bFixMin | byte(l)) - } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { - e.w.writen2(ct.b8, uint8(l)) - } else if l < 65536 { - e.w.writen1(ct.b16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - } else { - e.w.writen1(ct.b32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - d *Decoder - r decReader // *Decoder decReader decReaderT - h *MsgpackHandle - b [scratchByteArrayLen]byte - bd byte - bdRead bool - br bool // bytes reader - noBuiltInTypes - noStreamingCodec - decNoSeparator -} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - bd := d.bd - n := &d.d.n - var decodeFurther bool - - switch bd { - case mpNil: - n.v = valueTypeNil - d.bdRead = false - case mpFalse: - n.v = valueTypeBool - n.b = false - case mpTrue: - n.v = valueTypeBool - n.b = true - - case mpFloat: - n.v = valueTypeFloat - n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - case mpDouble: - n.v = valueTypeFloat - n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - - case mpUint8: - n.v = valueTypeUint - n.u = uint64(d.r.readn1()) - case mpUint16: - n.v = valueTypeUint - n.u = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - n.v = valueTypeUint - n.u = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - n.v = valueTypeUint - n.u = uint64(bigen.Uint64(d.r.readx(8))) - - case mpInt8: - n.v = valueTypeInt - n.i = int64(int8(d.r.readn1())) - case mpInt16: - n.v = valueTypeInt - n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - n.v = valueTypeInt - n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - n.v = valueTypeInt - n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - n.v = valueTypeString - n.s = d.DecodeString() - } else { - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - n.v = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - n.v = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - n.v = valueTypeExt - clen := d.readExtLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(clen) - default: - d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(bigen.Uint16(d.r.readx(2)))) - case mpUint32: - i = int64(uint64(bigen.Uint32(d.r.readx(4)))) - case mpUint64: - i = int64(bigen.Uint64(d.r.readx(8))) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - i = int64(bigen.Uint64(d.r.readx(8))) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - d.d.errorf("Overflow int value: %v", i) - return - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - ui = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - ui = bigen.Uint64(d.r.readx(8)) - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt16: - if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt32: - if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt64: - if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - return - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - d.d.errorf("Overflow uint value: %v", ui) - return - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFloat { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == mpDouble { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - f = float64(d.DecodeInt(0)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFalse || d.bd == 0 { - // b = false - } else if d.bd == mpTrue || d.bd == 1 { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - var clen int - // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin - if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) - } else { - clen = d.readContainerLen(msgpackContainerStr) - } - // println("DecodeBytes: clen: ", clen) - d.bdRead = false - // bytes may be nil, so handle it. if nil, clen=-1. - if clen < 0 { - return nil - } - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *msgpackDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *msgpackDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *msgpackDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *msgpackDecDriver) ContainerType() (vt valueType) { - bd := d.bd - if bd == mpNil { - return valueTypeNil - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || - (!d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { - return valueTypeBytes - } else if d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { - return valueTypeString - } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - return valueTypeArray - } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpNil { - d.bdRead = false - v = true - } - return -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - if bd == mpNil { - clen = -1 // to represent nil - } else if bd == ct.b8 { - clen = int(d.r.readn1()) - } else if bd == ct.b16 { - clen = int(bigen.Uint16(d.r.readx(2))) - } else if bd == ct.b32 { - clen = int(bigen.Uint32(d.r.readx(4))) - } else if (ct.bFixMin & bd) == ct.bFixMin { - clen = int(ct.bFixMin ^ bd) - } else { - d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) ReadMapStart() int { - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) ReadArrayStart() int { - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(bigen.Uint16(d.r.readx(2))) - case mpExt32: - clen = int(bigen.Uint32(d.r.readx(4))) - default: - d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) - return - } - return -} - -func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - xbd := d.bd - if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil, false, true) - } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || - (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeBytes(nil, true, true) - } else { - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool - binaryEncodingType -} - -func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { - return &msgpackEncDriver{e: e, w: e.w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { - return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *msgpackEncDriver) reset() { - e.w = e.e.w -} - -func (d *msgpackDecDriver) reset() { - d.r = d.d.r - d.bd, d.bdRead = 0, false -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.isClosed() { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go deleted file mode 100644 index cfee3d084..000000000 --- a/vendor/github.com/ugorji/go/codec/noop.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math/rand" - "time" -) - -// NoopHandle returns a no-op handle. It basically does nothing. -// It is only useful for benchmarking, as it gives an idea of the -// overhead from the codec framework. -// -// LIBRARY USERS: *** DO NOT USE *** -func NoopHandle(slen int) *noopHandle { - h := noopHandle{} - h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) - h.B = make([][]byte, slen) - h.S = make([]string, slen) - for i := 0; i < len(h.S); i++ { - b := make([]byte, i+1) - for j := 0; j < len(b); j++ { - b[j] = 'a' + byte(i) - } - h.B[i] = b - h.S[i] = string(b) - } - return &h -} - -// noopHandle does nothing. -// It is used to simulate the overhead of the codec framework. -type noopHandle struct { - BasicHandle - binaryEncodingType - noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. -} - -type noopDrv struct { - d *Decoder - e *Encoder - i int - S []string - B [][]byte - mks []bool // stack. if map (true), else if array (false) - mk bool // top of stack. what container are we on? map or array? - ct valueType // last response for IsContainerType. - cb int // counter for ContainerType - rand *rand.Rand -} - -func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } -func (h *noopDrv) m(v int) int { h.i++; return h.i % v } - -func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } -func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } - -func (h *noopDrv) reset() {} -func (h *noopDrv) uncacheRead() {} - -// --- encDriver - -// stack functions (for map and array) -func (h *noopDrv) start(b bool) { - // println("start", len(h.mks)+1) - h.mks = append(h.mks, b) - h.mk = b -} -func (h *noopDrv) end() { - // println("end: ", len(h.mks)-1) - h.mks = h.mks[:len(h.mks)-1] - if len(h.mks) > 0 { - h.mk = h.mks[len(h.mks)-1] - } else { - h.mk = false - } -} - -func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) EncodeNil() {} -func (h *noopDrv) EncodeInt(i int64) {} -func (h *noopDrv) EncodeUint(i uint64) {} -func (h *noopDrv) EncodeBool(b bool) {} -func (h *noopDrv) EncodeFloat32(f float32) {} -func (h *noopDrv) EncodeFloat64(f float64) {} -func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} -func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } -func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } -func (h *noopDrv) EncodeEnd() { h.end() } - -func (h *noopDrv) EncodeString(c charEncoding, v string) {} -func (h *noopDrv) EncodeSymbol(v string) {} -func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} - -func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} - -// ---- decDriver -func (h *noopDrv) initReadNext() {} -func (h *noopDrv) CheckBreak() bool { return false } -func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } -func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } -func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } -func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } -func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } -func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } - -// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } - -func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } - -func (h *noopDrv) ReadEnd() { h.end() } - -// toggle map/slice -func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } -func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } - -func (h *noopDrv) ContainerType() (vt valueType) { - // return h.m(2) == 0 - // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. - // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 - // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) - // however, every 10th time it is called, we just return something else. - var vals = [...]valueType{valueTypeArray, valueTypeMap} - // ------------ TAKE ------------ - // if h.cb%2 == 0 { - // if h.ct == valueTypeMap || h.ct == valueTypeArray { - // } else { - // h.ct = vals[h.m(2)] - // } - // } else if h.cb%5 == 0 { - // h.ct = valueType(h.m(8)) - // } else { - // h.ct = vals[h.m(2)] - // } - // ------------ TAKE ------------ - // if h.cb%16 == 0 { - // h.ct = valueType(h.cb % 8) - // } else { - // h.ct = vals[h.cb%2] - // } - h.ct = vals[h.cb%2] - h.cb++ - return h.ct - - // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { - // return h.ct - // } - // return valueTypeUnset - // TODO: may need to tweak this so it works. - // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { - // h.cb = !h.cb - // h.ct = vt - // return h.cb - // } - // // go in a loop and check it. - // h.ct = vt - // h.cb = h.m(7) == 0 - // return h.cb -} -func (h *noopDrv) TryDecodeAsNil() bool { - if h.mk { - return false - } else { - return h.m(8) == 0 - } -} -func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { - return 0 -} - -func (h *noopDrv) DecodeNaked() { - // use h.r (random) not h.m() because h.m() could cause the same value to be given. - var sk int - if h.mk { - // if mapkey, do not support values of nil OR bytes, array, map or rawext - sk = h.r(7) + 1 - } else { - sk = h.r(12) - } - n := &h.d.n - switch sk { - case 0: - n.v = valueTypeNil - case 1: - n.v, n.b = valueTypeBool, false - case 2: - n.v, n.b = valueTypeBool, true - case 3: - n.v, n.i = valueTypeInt, h.DecodeInt(64) - case 4: - n.v, n.u = valueTypeUint, h.DecodeUint(64) - case 5: - n.v, n.f = valueTypeFloat, h.DecodeFloat(true) - case 6: - n.v, n.f = valueTypeFloat, h.DecodeFloat(false) - case 7: - n.v, n.s = valueTypeString, h.DecodeString() - case 8: - n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] - case 9: - n.v = valueTypeArray - case 10: - n.v = valueTypeMap - default: - n.v = valueTypeExt - n.u = h.DecodeUint(64) - n.l = h.B[h.m(len(h.B))] - } - h.ct = n.v - return -} diff --git a/vendor/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go deleted file mode 100644 index 2353263e8..000000000 --- a/vendor/github.com/ugorji/go/codec/prebuild.go +++ /dev/null @@ -1,3 +0,0 @@ -package codec - -//go:generate bash prebuild.sh diff --git a/vendor/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh deleted file mode 100644 index 909f4bb0f..000000000 --- a/vendor/github.com/ugorji/go/codec/prebuild.sh +++ /dev/null @@ -1,199 +0,0 @@ -#!/bin/bash - -# _needgen is a helper function to tell if we need to generate files for msgp, codecgen. -_needgen() { - local a="$1" - zneedgen=0 - if [[ ! -e "$a" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - done - echo 0 -} - -# _build generates fast-path.go and gen-helper.go. -# -# It is needed because there is some dependency between the generated code -# and the other classes. Consequently, we have to totally remove the -# generated files and put stubs in place, before calling "go run" again -# to recreate them. -_build() { - if ! [[ "${zforce}" == "1" || - "1" == $( _needgen "fast-path.generated.go" ) || - "1" == $( _needgen "gen-helper.generated.go" ) || - "1" == $( _needgen "gen.generated.go" ) || - 1 == 0 ]] - then - return 0 - fi - - # echo "Running prebuild" - if [ "${zbak}" == "1" ] - then - # echo "Backing up old generated files" - _zts=`date '+%m%d%Y_%H%M%S'` - _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak - # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak - # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak - else - rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - fi - - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < math.MaxInt64 { - // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) - // return - // } - return -} - -func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("simple: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == simpleVdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - return - } - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdTrue { - b = true - } else if d.bd == simpleVdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) ReadMapStart() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) ReadArrayStart() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(bigen.Uint16(d.r.readx(2))) - case 3: - ui := uint64(bigen.Uint32(d.r.readx(4))) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - case 4: - ui := bigen.Uint64(d.r.readx(8)) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - } - d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs = d.DecodeBytes(nil, false, true) - default: - d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.bd { - case simpleVdNil: - n.v = valueTypeNil - case simpleVdFalse: - n.v = valueTypeBool - n.b = false - case simpleVdTrue: - n.v = valueTypeBool - n.b = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case simpleVdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case simpleVdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - n.v = valueTypeString - n.s = d.DecodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - n.v = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle - binaryEncodingType -} - -func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { - return &simpleEncDriver{e: e, w: e.w, h: h} -} - -func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { - return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *simpleEncDriver) reset() { - e.w = e.e.w -} - -func (d *simpleDecDriver) reset() { - d.r = d.d.r - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json deleted file mode 100644 index 902858671..000000000 --- a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json +++ /dev/null @@ -1,639 +0,0 @@ -[ - { - "cbor": "AA==", - "hex": "00", - "roundtrip": true, - "decoded": 0 - }, - { - "cbor": "AQ==", - "hex": "01", - "roundtrip": true, - "decoded": 1 - }, - { - "cbor": "Cg==", - "hex": "0a", - "roundtrip": true, - "decoded": 10 - }, - { - "cbor": "Fw==", - "hex": "17", - "roundtrip": true, - "decoded": 23 - }, - { - "cbor": "GBg=", - "hex": "1818", - "roundtrip": true, - "decoded": 24 - }, - { - "cbor": "GBk=", - "hex": "1819", - "roundtrip": true, - "decoded": 25 - }, - { - "cbor": "GGQ=", - "hex": "1864", - "roundtrip": true, - "decoded": 100 - }, - { - "cbor": "GQPo", - "hex": "1903e8", - "roundtrip": true, - "decoded": 1000 - }, - { - "cbor": "GgAPQkA=", - "hex": "1a000f4240", - "roundtrip": true, - "decoded": 1000000 - }, - { - "cbor": "GwAAAOjUpRAA", - "hex": "1b000000e8d4a51000", - "roundtrip": true, - "decoded": 1000000000000 - }, - { - "cbor": "G///////////", - "hex": "1bffffffffffffffff", - "roundtrip": true, - "decoded": 18446744073709551615 - }, - { - "cbor": "wkkBAAAAAAAAAAA=", - "hex": "c249010000000000000000", - "roundtrip": true, - "decoded": 18446744073709551616 - }, - { - "cbor": "O///////////", - "hex": "3bffffffffffffffff", - "roundtrip": true, - "decoded": -18446744073709551616, - "skip": true - }, - { - "cbor": "w0kBAAAAAAAAAAA=", - "hex": "c349010000000000000000", - "roundtrip": true, - "decoded": -18446744073709551617 - }, - { - "cbor": "IA==", - "hex": "20", - "roundtrip": true, - "decoded": -1 - }, - { - "cbor": "KQ==", - "hex": "29", - "roundtrip": true, - "decoded": -10 - }, - { - "cbor": "OGM=", - "hex": "3863", - "roundtrip": true, - "decoded": -100 - }, - { - "cbor": "OQPn", - "hex": "3903e7", - "roundtrip": true, - "decoded": -1000 - }, - { - "cbor": "+QAA", - "hex": "f90000", - "roundtrip": true, - "decoded": 0.0 - }, - { - "cbor": "+YAA", - "hex": "f98000", - "roundtrip": true, - "decoded": -0.0 - }, - { - "cbor": "+TwA", - "hex": "f93c00", - "roundtrip": true, - "decoded": 1.0 - }, - { - "cbor": "+z/xmZmZmZma", - "hex": "fb3ff199999999999a", - "roundtrip": true, - "decoded": 1.1 - }, - { - "cbor": "+T4A", - "hex": "f93e00", - "roundtrip": true, - "decoded": 1.5 - }, - { - "cbor": "+Xv/", - "hex": "f97bff", - "roundtrip": true, - "decoded": 65504.0 - }, - { - "cbor": "+kfDUAA=", - "hex": "fa47c35000", - "roundtrip": true, - "decoded": 100000.0 - }, - { - "cbor": "+n9///8=", - "hex": "fa7f7fffff", - "roundtrip": true, - "decoded": 3.4028234663852886e+38 - }, - { - "cbor": "+3435DyIAHWc", - "hex": "fb7e37e43c8800759c", - "roundtrip": true, - "decoded": 1.0e+300 - }, - { - "cbor": "+QAB", - "hex": "f90001", - "roundtrip": true, - "decoded": 5.960464477539063e-08 - }, - { - "cbor": "+QQA", - "hex": "f90400", - "roundtrip": true, - "decoded": 6.103515625e-05 - }, - { - "cbor": "+cQA", - "hex": "f9c400", - "roundtrip": true, - "decoded": -4.0 - }, - { - "cbor": "+8AQZmZmZmZm", - "hex": "fbc010666666666666", - "roundtrip": true, - "decoded": -4.1 - }, - { - "cbor": "+XwA", - "hex": "f97c00", - "roundtrip": true, - "diagnostic": "Infinity" - }, - { - "cbor": "+X4A", - "hex": "f97e00", - "roundtrip": true, - "diagnostic": "NaN" - }, - { - "cbor": "+fwA", - "hex": "f9fc00", - "roundtrip": true, - "diagnostic": "-Infinity" - }, - { - "cbor": "+n+AAAA=", - "hex": "fa7f800000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+n/AAAA=", - "hex": "fa7fc00000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+v+AAAA=", - "hex": "faff800000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "+3/wAAAAAAAA", - "hex": "fb7ff0000000000000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+3/4AAAAAAAA", - "hex": "fb7ff8000000000000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+//wAAAAAAAA", - "hex": "fbfff0000000000000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "9A==", - "hex": "f4", - "roundtrip": true, - "decoded": false - }, - { - "cbor": "9Q==", - "hex": "f5", - "roundtrip": true, - "decoded": true - }, - { - "cbor": "9g==", - "hex": "f6", - "roundtrip": true, - "decoded": null - }, - { - "cbor": "9w==", - "hex": "f7", - "roundtrip": true, - "diagnostic": "undefined" - }, - { - "cbor": "8A==", - "hex": "f0", - "roundtrip": true, - "diagnostic": "simple(16)" - }, - { - "cbor": "+Bg=", - "hex": "f818", - "roundtrip": true, - "diagnostic": "simple(24)" - }, - { - "cbor": "+P8=", - "hex": "f8ff", - "roundtrip": true, - "diagnostic": "simple(255)" - }, - { - "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", - "hex": "c074323031332d30332d32315432303a30343a30305a", - "roundtrip": true, - "diagnostic": "0(\"2013-03-21T20:04:00Z\")" - }, - { - "cbor": "wRpRS2ew", - "hex": "c11a514b67b0", - "roundtrip": true, - "diagnostic": "1(1363896240)" - }, - { - "cbor": "wftB1FLZ7CAAAA==", - "hex": "c1fb41d452d9ec200000", - "roundtrip": true, - "diagnostic": "1(1363896240.5)" - }, - { - "cbor": "10QBAgME", - "hex": "d74401020304", - "roundtrip": true, - "diagnostic": "23(h'01020304')" - }, - { - "cbor": "2BhFZElFVEY=", - "hex": "d818456449455446", - "roundtrip": true, - "diagnostic": "24(h'6449455446')" - }, - { - "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", - "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", - "roundtrip": true, - "diagnostic": "32(\"http://www.example.com\")" - }, - { - "cbor": "QA==", - "hex": "40", - "roundtrip": true, - "diagnostic": "h''" - }, - { - "cbor": "RAECAwQ=", - "hex": "4401020304", - "roundtrip": true, - "diagnostic": "h'01020304'" - }, - { - "cbor": "YA==", - "hex": "60", - "roundtrip": true, - "decoded": "" - }, - { - "cbor": "YWE=", - "hex": "6161", - "roundtrip": true, - "decoded": "a" - }, - { - "cbor": "ZElFVEY=", - "hex": "6449455446", - "roundtrip": true, - "decoded": "IETF" - }, - { - "cbor": "YiJc", - "hex": "62225c", - "roundtrip": true, - "decoded": "\"\\" - }, - { - "cbor": "YsO8", - "hex": "62c3bc", - "roundtrip": true, - "decoded": "ü" - }, - { - "cbor": "Y+awtA==", - "hex": "63e6b0b4", - "roundtrip": true, - "decoded": "水" - }, - { - "cbor": "ZPCQhZE=", - "hex": "64f0908591", - "roundtrip": true, - "decoded": "𐅑" - }, - { - "cbor": "gA==", - "hex": "80", - "roundtrip": true, - "decoded": [ - - ] - }, - { - "cbor": "gwECAw==", - "hex": "83010203", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3 - ] - }, - { - "cbor": "gwGCAgOCBAU=", - "hex": "8301820203820405", - "roundtrip": true, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", - "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "oA==", - "hex": "a0", - "roundtrip": true, - "decoded": { - } - }, - { - "cbor": "ogECAwQ=", - "hex": "a201020304", - "roundtrip": true, - "skip": true, - "diagnostic": "{1: 2, 3: 4}" - }, - { - "cbor": "omFhAWFiggID", - "hex": "a26161016162820203", - "roundtrip": true, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhoWFiYWM=", - "hex": "826161a161626163", - "roundtrip": true, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", - "hex": "a56161614161626142616361436164614461656145", - "roundtrip": true, - "decoded": { - "a": "A", - "b": "B", - "c": "C", - "d": "D", - "e": "E" - } - }, - { - "cbor": "X0IBAkMDBAX/", - "hex": "5f42010243030405ff", - "roundtrip": false, - "skip": true, - "diagnostic": "(_ h'0102', h'030405')" - }, - { - "cbor": "f2VzdHJlYWRtaW5n/w==", - "hex": "7f657374726561646d696e67ff", - "roundtrip": false, - "decoded": "streaming" - }, - { - "cbor": "n/8=", - "hex": "9fff", - "roundtrip": false, - "decoded": [ - - ] - }, - { - "cbor": "nwGCAgOfBAX//w==", - "hex": "9f018202039f0405ffff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwGCAgOCBAX/", - "hex": "9f01820203820405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGCAgOfBAX/", - "hex": "83018202039f0405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGfAgP/ggQF", - "hex": "83019f0203ff820405", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", - "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", - "roundtrip": false, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "v2FhAWFinwID//8=", - "hex": "bf61610161629f0203ffff", - "roundtrip": false, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhv2FiYWP/", - "hex": "826161bf61626163ff", - "roundtrip": false, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "v2NGdW71Y0FtdCH/", - "hex": "bf6346756ef563416d7421ff", - "roundtrip": false, - "decoded": { - "Fun": true, - "Amt": -2 - } - } -] diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py deleted file mode 100644 index c0ad20b34..000000000 --- a/vendor/github.com/ugorji/go/codec/test.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python - -# This will create golden files in a directory passed to it. -# A Test calls this internally to create the golden files -# So it can process them (so we don't have to checkin the files). - -# Ensure msgpack-python and cbor are installed first, using: -# sudo apt-get install python-dev -# sudo apt-get install python-pip -# pip install --user msgpack-python msgpack-rpc-python cbor - -# Ensure all "string" keys are utf strings (else encoded as bytes) - -import cbor, msgpack, msgpackrpc, sys, os, threading - -def get_test_data_list(): - # get list with all primitive types, and a combo type - l0 = [ - -8, - -1616, - -32323232, - -6464646464646464, - 192, - 1616, - 32323232, - 6464646464646464, - 192, - -3232.0, - -6464646464.0, - 3232.0, - 6464.0, - 6464646464.0, - False, - True, - u"null", - None, - u"someday", - 1328176922000002000, - u"", - -2206187877999998000, - u"bytestring", - 270, - u"none", - -2013855847999995777, - #-6795364578871345152, - ] - l1 = [ - { "true": True, - "false": False }, - { "true": u"True", - "false": False, - "uint16(1616)": 1616 }, - { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], - "int32":32323232, "bool": True, - "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": u"1234567890" }, - { True: "true", 138: False, "false": 200 } - ] - - l = [] - l.extend(l0) - l.append(l0) - l.append(1) - l.extend(l1) - return l - -def build_test_data(destdir): - l = get_test_data_list() - for i in range(len(l)): - # packer = msgpack.Packer() - serialized = msgpack.dumps(l[i]) - f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') - f.write(serialized) - f.close() - serialized = cbor.dumps(l[i]) - f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') - f.write(serialized) - f.close() - -def doRpcServer(port, stopTimeSec): - class EchoHandler(object): - def Echo123(self, msg1, msg2, msg3): - return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) - def EchoStruct(self, msg): - return ("%s" % msg) - - addr = msgpackrpc.Address('localhost', port) - server = msgpackrpc.Server(EchoHandler()) - server.listen(addr) - # run thread to stop it after stopTimeSec seconds if > 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/vendor/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/ugorji/go/codec/tests.sh deleted file mode 100644 index 342f336df..000000000 --- a/vendor/github.com/ugorji/go/codec/tests.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash - -# Run all the different permutations of all the tests. -# This helps ensure that nothing gets broken. - -_run() { - # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), - # binc-nosymbols (n), struct2array (s), intern string (e), - # json-indent (d), circular (l) - # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) - # 3. OPTIONS: verbose (v), reset (z), must (m), - # - # Use combinations of mode to get exactly what you want, - # and then pass the variations you need. - - ztags="" - zargs="" - local OPTIND - OPTIND=1 - while getopts "_xurtcinsvgzmefdl" flag - do - case "x$flag" in - 'xr') ;; - 'xf') ztags="$ztags notfastpath" ;; - 'xg') ztags="$ztags codecgen" ;; - 'xx') ztags="$ztags x" ;; - 'xu') ztags="$ztags unsafe" ;; - 'xv') zargs="$zargs -tv" ;; - 'xz') zargs="$zargs -tr" ;; - 'xm') zargs="$zargs -tm" ;; - 'xl') zargs="$zargs -tl" ;; - *) ;; - esac - done - # shift $((OPTIND-1)) - printf '............. TAGS: %s .............\n' "$ztags" - # echo ">>>>>>> TAGS: $ztags" - - OPTIND=1 - while getopts "_xurtcinsvgzmefdl" flag - do - case "x$flag" in - 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; - 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; - 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; - 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; - 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; - 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; - 'xd') printf ">>>>>>> INDENT : "; - go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; - go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; - sleep 2 ;; - *) ;; - esac - done - shift $((OPTIND-1)) - - OPTIND=1 -} - -# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then - # All: r, x, g, gu - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset - _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) - _run "-x_tcinsed_ml" # external - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe -elif [[ "x$@" = "x-Z" ]]; then - # Regular - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset -elif [[ "x$@" = "x-F" ]]; then - # regular with notfastpath - _run "-_tcinsed_ml_f" # regular - _run "-_tcinsed_ml_zf" # regular with reset -elif [[ "x$@" = "x-C" ]]; then - # codecgen - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe -elif [[ "x$@" = "x-X" ]]; then - # external - _run "-x_tcinsed_ml" # external -elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then - cat <= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore new file mode 100644 index 000000000..9f11b755a --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/.gitignore @@ -0,0 +1 @@ +.idea/ diff --git a/vendor/github.com/vishvananda/netlink/.travis.yml b/vendor/github.com/vishvananda/netlink/.travis.yml index 4cef4f36a..80219c69d 100644 --- a/vendor/github.com/vishvananda/netlink/.travis.yml +++ b/vendor/github.com/vishvananda/netlink/.travis.yml @@ -1,7 +1,8 @@ language: go go: - - "1.10.x" - - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" before_script: # make sure we keep path in tact when we sudo - sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers @@ -13,5 +14,7 @@ before_script: - sudo modprobe nf_conntrack_ipv4 - sudo modprobe nf_conntrack_ipv6 - sudo modprobe sch_hfsc + - sudo modprobe sch_sfq install: - - go get github.com/vishvananda/netns + - go get -v -t ./... +go_import_path: github.com/vishvananda/netlink diff --git a/vendor/github.com/vishvananda/netlink/addr.go b/vendor/github.com/vishvananda/netlink/addr.go index f08c95696..653f540db 100644 --- a/vendor/github.com/vishvananda/netlink/addr.go +++ b/vendor/github.com/vishvananda/netlink/addr.go @@ -17,6 +17,7 @@ type Addr struct { Broadcast net.IP PreferedLft int ValidLft int + LinkIndex int } // String returns $ip/$netmask $label diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 797504d0b..71da251ca 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -11,43 +11,63 @@ import ( "golang.org/x/sys/unix" ) -// IFA_FLAGS is a u32 attribute. -const IFA_FLAGS = 0x8 - // AddrAdd will add an IP address to a link device. +// // Equivalent to: `ip addr add $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func AddrAdd(link Link, addr *Addr) error { return pkgHandle.AddrAdd(link, addr) } // AddrAdd will add an IP address to a link device. +// // Equivalent to: `ip addr add $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func (h *Handle) AddrAdd(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } // AddrReplace will replace (or, if not present, add) an IP address on a link device. +// // Equivalent to: `ip addr replace $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func AddrReplace(link Link, addr *Addr) error { return pkgHandle.AddrReplace(link, addr) } // AddrReplace will replace (or, if not present, add) an IP address on a link device. +// // Equivalent to: `ip addr replace $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func (h *Handle) AddrReplace(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) return h.addrHandle(link, addr, req) } // AddrDel will delete an IP address from a link device. +// // Equivalent to: `ip addr del $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func AddrDel(link Link, addr *Addr) error { return pkgHandle.AddrDel(link, addr) } // AddrDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. func (h *Handle) AddrDel(link Link, addr *Addr) error { req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) return h.addrHandle(link, addr, req) @@ -102,20 +122,26 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } else { b := make([]byte, 4) native.PutUint32(b, uint32(addr.Flags)) - flagsData := nl.NewRtAttr(IFA_FLAGS, b) + flagsData := nl.NewRtAttr(unix.IFA_FLAGS, b) req.AddData(flagsData) } } if family == FAMILY_V4 { - if addr.Broadcast == nil { + // Automatically set the broadcast address if it is unset and the + // subnet is large enough to sensibly have one (/30 or larger). + // See: RFC 3021 + if addr.Broadcast == nil && prefixlen < 31 { calcBroadcast := make(net.IP, masklen/8) for i := range localAddrData { calcBroadcast[i] = localAddrData[i] | ^mask[i] } addr.Broadcast = calcBroadcast } - req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + + if addr.Broadcast != nil { + req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + } if addr.Label != "" { labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) @@ -127,10 +153,10 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error // value should be "forever". To compensate for that, only add the attributes if at least one of the values is // non-zero, which means the caller has explicitly set them if addr.ValidLft > 0 || addr.PreferedLft > 0 { - cachedata := nl.IfaCacheInfo{ - IfaValid: uint32(addr.ValidLft), - IfaPrefered: uint32(addr.PreferedLft), - } + cachedata := nl.IfaCacheInfo{unix.IfaCacheinfo{ + Valid: uint32(addr.ValidLft), + Prefered: uint32(addr.PreferedLft), + }} req.AddData(nl.NewRtAttr(unix.IFA_CACHEINFO, cachedata.Serialize())) } @@ -167,12 +193,12 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { var res []Addr for _, m := range msgs { - addr, msgFamily, ifindex, err := parseAddr(m) + addr, msgFamily, err := parseAddr(m) if err != nil { return res, err } - if link != nil && ifindex != indexFilter { + if link != nil && addr.LinkIndex != indexFilter { // Ignore messages from other interfaces continue } @@ -187,11 +213,11 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { return res, nil } -func parseAddr(m []byte) (addr Addr, family, index int, err error) { +func parseAddr(m []byte) (addr Addr, family int, err error) { msg := nl.DeserializeIfAddrmsg(m) family = -1 - index = -1 + addr.LinkIndex = -1 attrs, err1 := nl.ParseRouteAttr(m[msg.Len():]) if err1 != nil { @@ -200,7 +226,7 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { } family = int(msg.Family) - index = int(msg.Index) + addr.LinkIndex = int(msg.Index) var local, dst *net.IPNet for _, attr := range attrs { @@ -225,12 +251,12 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { addr.Broadcast = attr.Value case unix.IFA_LABEL: addr.Label = string(attr.Value[:len(attr.Value)-1]) - case IFA_FLAGS: + case unix.IFA_FLAGS: addr.Flags = int(native.Uint32(attr.Value[0:4])) - case nl.IFA_CACHEINFO: + case unix.IFA_CACHEINFO: ci := nl.DeserializeIfaCacheInfo(attr.Value) - addr.PreferedLft = int(ci.IfaPrefered) - addr.ValidLft = int(ci.IfaValid) + addr.PreferedLft = int(ci.Prefered) + addr.ValidLft = int(ci.Valid) } } @@ -270,21 +296,22 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil, false) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0) } // AddrSubscribeOptions contains a set of options to use with // AddrSubscribeWithOptions. type AddrSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -295,10 +322,10 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err @@ -309,6 +336,12 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c s.Close() }() } + if rcvbuf != 0 { + err = pkgHandle.SetSocketReceiveBufferSize(rcvbuf, false) + if err != nil { + return err + } + } if listExisting { req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) @@ -321,13 +354,19 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { continue @@ -339,28 +378,29 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) } - return + continue } msgType := m.Header.Type if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { if cberr != nil { cberr(fmt.Errorf("bad message type: %d", msgType)) } - return + continue } - addr, _, ifindex, err := parseAddr(m.Data) + addr, _, err := parseAddr(m.Data) if err != nil { if cberr != nil { cberr(fmt.Errorf("could not parse address: %v", err)) } - return + continue } ch <- AddrUpdate{LinkAddress: *addr.IPNet, - LinkIndex: ifindex, + LinkIndex: addr.LinkIndex, NewAddr: msgType == unix.RTM_NEWADDR, Flags: addr.Flags, Scope: addr.Scope, diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index fd4c99147..6e1224c47 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -108,8 +108,5 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) - if err != nil { - return err - } - return nil + return err } diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go index bd9bbb9ff..10ceffed8 100644 --- a/vendor/github.com/vishvananda/netlink/class.go +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -41,7 +41,7 @@ type GnetStatsQueue struct { Overlimits uint32 // number of enqueues over the limit } -// ClassStatistics representaion based on generic networking statisticsfor netlink. +// ClassStatistics representation based on generic networking statistics for netlink. // See Documentation/networking/gen_stats.txt in Linux source code for more details. type ClassStatistics struct { Basic *GnetStatsBasic @@ -127,12 +127,15 @@ func (class *GenericClass) Attrs() *ClassAttrs { return &class.ClassAttrs } -// Type retrun the class type +// Type return the class type func (class *GenericClass) Type() string { return class.ClassType } -// ServiceCurve is the way the HFSC curve are represented +// ServiceCurve is a nondecreasing function of some time unit, returning the amount of service +// (an allowed or allocated amount of bandwidth) at some specific point in time. The purpose of it +// should be subconsciously obvious: if a class was allowed to transfer not less than the amount +// specified by its service curve, then the service curve is not violated. type ServiceCurve struct { m1 uint32 d uint32 @@ -144,6 +147,21 @@ func (c *ServiceCurve) Attrs() (uint32, uint32, uint32) { return c.m1, c.d, c.m2 } +// Burst returns the burst rate (m1) of the curve +func (c *ServiceCurve) Burst() uint32 { + return c.m1 +} + +// Delay return the delay (d) of the curve +func (c *ServiceCurve) Delay() uint32 { + return c.d +} + +// Rate returns the rate (m2) of the curve +func (c *ServiceCurve) Rate() uint32 { + return c.m2 +} + // HfscClass is a representation of the HFSC class type HfscClass struct { ClassAttrs @@ -152,35 +170,44 @@ type HfscClass struct { Usc ServiceCurve } -// SetUsc sets the Usc curve +// SetUsc sets the USC curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. func (hfsc *HfscClass) SetUsc(m1 uint32, d uint32, m2 uint32) { - hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2} } -// SetFsc sets the Fsc curve +// SetFsc sets the Fsc curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. func (hfsc *HfscClass) SetFsc(m1 uint32, d uint32, m2 uint32) { - hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2} } -// SetRsc sets the Rsc curve +// SetRsc sets the Rsc curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. func (hfsc *HfscClass) SetRsc(m1 uint32, d uint32, m2 uint32) { - hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2} } -// SetSC implements the SC from the tc CLI +// SetSC implements the SC from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. func (hfsc *HfscClass) SetSC(m1 uint32, d uint32, m2 uint32) { - hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} - hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.SetRsc(m1, d, m2) + hfsc.SetFsc(m1, d, m2) } -// SetUL implements the UL from the tc CLI +// SetUL implements the UL from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. func (hfsc *HfscClass) SetUL(m1 uint32, d uint32, m2 uint32) { - hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.SetUsc(m1, d, m2) } -// SetLS implemtens the LS from the tc CLI +// SetLS implements the LS from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. func (hfsc *HfscClass) SetLS(m1 uint32, d uint32, m2 uint32) { - hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.SetFsc(m1, d, m2) } // NewHfscClass returns a new HFSC struct with the set parameters @@ -193,6 +220,7 @@ func NewHfscClass(attrs ClassAttrs) *HfscClass { } } +// String() returns a string that contains the information and attributes of the HFSC class func (hfsc *HfscClass) String() string { return fmt.Sprintf( "{%s -- {RSC: {m1=%d d=%d m2=%d}} {FSC: {m1=%d d=%d m2=%d}} {USC: {m1=%d d=%d m2=%d}}}", diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index 31091e501..e664ade7f 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -43,12 +43,12 @@ func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { if buffer == 0 { buffer = uint32(float64(rate)/Hz() + float64(mtu)) } - buffer = uint32(Xmittime(rate, buffer)) + buffer = Xmittime(rate, buffer) if cbuffer == 0 { cbuffer = uint32(float64(ceil)/Hz() + float64(mtu)) } - cbuffer = uint32(Xmittime(ceil, cbuffer)) + cbuffer = Xmittime(ceil, cbuffer) return &HtbClass{ ClassAttrs: attrs, @@ -56,9 +56,9 @@ func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { Ceil: ceil, Buffer: buffer, Cbuffer: cbuffer, - Quantum: 10, Level: 0, - Prio: 0, + Prio: cattrs.Prio, + Quantum: cattrs.Quantum, } } @@ -179,12 +179,15 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { case "hfsc": hfsc := class.(*HfscClass) opt := nl.HfscCopt{} - opt.Rsc.Set(hfsc.Rsc.Attrs()) - opt.Fsc.Set(hfsc.Fsc.Attrs()) - opt.Usc.Set(hfsc.Usc.Attrs()) - options.AddRtAttr(nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) - options.AddRtAttr(nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) - options.AddRtAttr(nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) + rm1, rd, rm2 := hfsc.Rsc.Attrs() + opt.Rsc.Set(rm1/8, rd, rm2/8) + fm1, fd, fm2 := hfsc.Fsc.Attrs() + opt.Fsc.Set(fm1/8, fd, fm2/8) + um1, ud, um2 := hfsc.Usc.Attrs() + opt.Usc.Set(um1/8, ud, um2/8) + nl.NewRtAttrChild(options, nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) + nl.NewRtAttrChild(options, nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) + nl.NewRtAttrChild(options, nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) } req.AddData(options) return nil @@ -315,11 +318,11 @@ func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, err m1, d, m2 := nl.DeserializeHfscCurve(datum.Value).Attrs() switch datum.Attr.Type { case nl.TCA_HFSC_RSC: - hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2} + hfsc.Rsc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} case nl.TCA_HFSC_FSC: - hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2} + hfsc.Fsc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} case nl.TCA_HFSC_USC: - hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2} + hfsc.Usc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} } } return detailed, nil diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index 5300d38bd..ab91f4e55 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -22,11 +22,7 @@ const ( // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2 ConntrackExpectTable = 2 ) -const ( - // For Parsing Mark - TCP_PROTO = 6 - UDP_PROTO = 17 -) + const ( // backward compatibility with golang 1.6 which does not have io.SeekCurrent seekCurrent = 1 @@ -223,6 +219,10 @@ func parseBERaw16(r *bytes.Reader, v *uint16) { binary.Read(r, binary.BigEndian, v) } +func parseBERaw32(r *bytes.Reader, v *uint32) { + binary.Read(r, binary.BigEndian, v) +} + func parseBERaw64(r *bytes.Reader, v *uint64) { binary.Read(r, binary.BigEndian, v) } @@ -241,9 +241,13 @@ func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { return } +func parseConnectionMark(r *bytes.Reader) (mark uint32) { + parseBERaw32(r, &mark) + return +} + func parseRawData(data []byte) *ConntrackFlow { s := &ConntrackFlow{} - var proto uint8 // First there is the Nfgenmsg header // consume only the family field reader := bytes.NewReader(data) @@ -263,7 +267,7 @@ func parseRawData(data []byte) *ConntrackFlow { switch t { case nl.CTA_TUPLE_ORIG: if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { - proto = parseIpTuple(reader, &s.Forward) + parseIpTuple(reader, &s.Forward) } case nl.CTA_TUPLE_REPLY: if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { @@ -277,19 +281,11 @@ func parseRawData(data []byte) *ConntrackFlow { case nl.CTA_COUNTERS_REPLY: s.Reverse.Bytes, s.Reverse.Packets = parseByteAndPacketCounters(reader) } - } - } - if proto == TCP_PROTO { - reader.Seek(64, seekCurrent) - _, t, _, v := parseNfAttrTLV(reader) - if t == nl.CTA_MARK { - s.Mark = uint32(v[3]) - } - } else if proto == UDP_PROTO { - reader.Seek(16, seekCurrent) - _, t, _, v := parseNfAttrTLV(reader) - if t == nl.CTA_MARK { - s.Mark = uint32(v[3]) + } else { + switch t { + case nl.CTA_MARK: + s.Mark = parseConnectionMark(reader) + } } } return s @@ -322,18 +318,25 @@ func parseRawData(data []byte) *ConntrackFlow { // --mask-src ip Source mask address // --mask-dst ip Destination mask address +// Layer 4 Protocol common parameters and options: +// TCP, UDP, SCTP, UDPLite and DCCP +// --sport, --orig-port-src port Source port in original direction +// --dport, --orig-port-dst port Destination port in original direction + // Filter types type ConntrackFilterType uint8 const ( - ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction - ConntrackOrigDstIP // -orig-dst ip Destination address from original direction - ConntrackReplySrcIP // --reply-src ip Reply Source IP - ConntrackReplyDstIP // --reply-dst ip Reply Destination IP - ConntrackReplyAnyIP // Match source or destination reply IP - ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP - ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP - ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instaed ConntrackReplyAnyIP + ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction + ConntrackOrigDstIP // -orig-dst ip Destination address from original direction + ConntrackReplySrcIP // --reply-src ip Reply Source IP + ConntrackReplyDstIP // --reply-dst ip Reply Destination IP + ConntrackReplyAnyIP // Match source or destination reply IP + ConntrackOrigSrcPort // --orig-port-src port Source port in original direction + ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction + ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP + ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP + ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP ) type CustomConntrackFilter interface { @@ -343,7 +346,9 @@ type CustomConntrackFilter interface { } type ConntrackFilter struct { - ipFilter map[ConntrackFilterType]net.IP + ipFilter map[ConntrackFilterType]net.IP + portFilter map[ConntrackFilterType]uint16 + protoFilter uint8 } // AddIP adds an IP to the conntrack filter @@ -358,38 +363,89 @@ func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error { return nil } +// AddPort adds a Port to the conntrack filter if the Layer 4 protocol allows it +func (f *ConntrackFilter) AddPort(tp ConntrackFilterType, port uint16) error { + switch f.protoFilter { + // TCP, UDP, DCCP, SCTP, UDPLite + case 6, 17, 33, 132, 136: + default: + return fmt.Errorf("Filter attribute not available without a valid Layer 4 protocol: %d", f.protoFilter) + } + + if f.portFilter == nil { + f.portFilter = make(map[ConntrackFilterType]uint16) + } + if _, ok := f.portFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.portFilter[tp] = port + return nil +} + +// AddProtocol adds the Layer 4 protocol to the conntrack filter +func (f *ConntrackFilter) AddProtocol(proto uint8) error { + if f.protoFilter != 0 { + return errors.New("Filter attribute already present") + } + f.protoFilter = proto + return nil +} + // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter // false otherwise func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { - if len(f.ipFilter) == 0 { + if len(f.ipFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 { // empty filter always not match return false } - match := true - // -orig-src ip Source address from original direction - if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found { - match = match && elem.Equal(flow.Forward.SrcIP) + // -p, --protonum proto Layer 4 Protocol, eg. 'tcp' + if f.protoFilter != 0 && flow.Forward.Protocol != f.protoFilter { + // different Layer 4 protocol always not match + return false } - // -orig-dst ip Destination address from original direction - if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found { - match = match && elem.Equal(flow.Forward.DstIP) - } + match := true - // -src-nat ip Source NAT ip - if elem, found := f.ipFilter[ConntrackReplySrcIP]; match && found { - match = match && elem.Equal(flow.Reverse.SrcIP) - } + // IP conntrack filter + if len(f.ipFilter) > 0 { + // -orig-src ip Source address from original direction + if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found { + match = match && elem.Equal(flow.Forward.SrcIP) + } + + // -orig-dst ip Destination address from original direction + if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found { + match = match && elem.Equal(flow.Forward.DstIP) + } - // -dst-nat ip Destination NAT ip - if elem, found := f.ipFilter[ConntrackReplyDstIP]; match && found { - match = match && elem.Equal(flow.Reverse.DstIP) + // -src-nat ip Source NAT ip + if elem, found := f.ipFilter[ConntrackReplySrcIP]; match && found { + match = match && elem.Equal(flow.Reverse.SrcIP) + } + + // -dst-nat ip Destination NAT ip + if elem, found := f.ipFilter[ConntrackReplyDstIP]; match && found { + match = match && elem.Equal(flow.Reverse.DstIP) + } + + // Match source or destination reply IP + if elem, found := f.ipFilter[ConntrackReplyAnyIP]; match && found { + match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP)) + } } - // Match source or destination reply IP - if elem, found := f.ipFilter[ConntrackReplyAnyIP]; match && found { - match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP)) + // Layer 4 Port filter + if len(f.portFilter) > 0 { + // -orig-port-src port Source port from original direction + if elem, found := f.portFilter[ConntrackOrigSrcPort]; match && found { + match = match && elem == flow.Forward.SrcPort + } + + // -orig-port-dst port Destination port from original direction + if elem, found := f.portFilter[ConntrackOrigDstPort]; match && found { + match = match && elem == flow.Forward.DstPort + } } return match diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go new file mode 100644 index 000000000..29b3f8ec1 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -0,0 +1,272 @@ +package netlink + +import ( + "syscall" + + "fmt" + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// DevlinkDevEswitchAttr represents device's eswitch attributes +type DevlinkDevEswitchAttr struct { + Mode string + InlineMode string + EncapMode string +} + +// DevlinkDevAttrs represents device attributes +type DevlinkDevAttrs struct { + Eswitch DevlinkDevEswitchAttr +} + +// DevlinkDevice represents device and its attributes +type DevlinkDevice struct { + BusName string + DeviceName string + Attrs DevlinkDevAttrs +} + +func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) { + devices := make([]*DevlinkDevice, 0, len(msgs)) + for _, m := range msgs { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + dev := &DevlinkDevice{} + if err = dev.parseAttributes(attrs); err != nil { + return nil, err + } + devices = append(devices, dev) + } + return devices, nil +} + +func eswitchStringToMode(modeName string) (uint16, error) { + if modeName == "legacy" { + return nl.DEVLINK_ESWITCH_MODE_LEGACY, nil + } else if modeName == "switchdev" { + return nl.DEVLINK_ESWITCH_MODE_SWITCHDEV, nil + } else { + return 0xffff, fmt.Errorf("invalid switchdev mode") + } +} + +func parseEswitchMode(mode uint16) string { + var eswitchMode = map[uint16]string{ + nl.DEVLINK_ESWITCH_MODE_LEGACY: "legacy", + nl.DEVLINK_ESWITCH_MODE_SWITCHDEV: "switchdev", + } + if eswitchMode[mode] == "" { + return "unknown" + } else { + return eswitchMode[mode] + } +} + +func parseEswitchInlineMode(inlinemode uint8) string { + var eswitchInlineMode = map[uint8]string{ + nl.DEVLINK_ESWITCH_INLINE_MODE_NONE: "none", + nl.DEVLINK_ESWITCH_INLINE_MODE_LINK: "link", + nl.DEVLINK_ESWITCH_INLINE_MODE_NETWORK: "network", + nl.DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: "transport", + } + if eswitchInlineMode[inlinemode] == "" { + return "unknown" + } else { + return eswitchInlineMode[inlinemode] + } +} + +func parseEswitchEncapMode(encapmode uint8) string { + var eswitchEncapMode = map[uint8]string{ + nl.DEVLINK_ESWITCH_ENCAP_MODE_NONE: "disable", + nl.DEVLINK_ESWITCH_ENCAP_MODE_BASIC: "enable", + } + if eswitchEncapMode[encapmode] == "" { + return "unknown" + } else { + return eswitchEncapMode[encapmode] + } +} + +func (d *DevlinkDevice) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + for _, a := range attrs { + switch a.Attr.Type { + case nl.DEVLINK_ATTR_BUS_NAME: + d.BusName = string(a.Value) + case nl.DEVLINK_ATTR_DEV_NAME: + d.DeviceName = string(a.Value) + case nl.DEVLINK_ATTR_ESWITCH_MODE: + d.Attrs.Eswitch.Mode = parseEswitchMode(native.Uint16(a.Value)) + case nl.DEVLINK_ATTR_ESWITCH_INLINE_MODE: + d.Attrs.Eswitch.InlineMode = parseEswitchInlineMode(uint8(a.Value[0])) + case nl.DEVLINK_ATTR_ESWITCH_ENCAP_MODE: + d.Attrs.Eswitch.EncapMode = parseEswitchEncapMode(uint8(a.Value[0])) + } + } + return nil +} + +func (dev *DevlinkDevice) parseEswitchAttrs(msgs [][]byte) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return + } + dev.parseAttributes(attrs) +} + +func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_ESWITCH_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(family.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK) + req.AddData(msg) + + b := make([]byte, len(dev.BusName)) + copy(b, dev.BusName) + data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) + req.AddData(data) + + b = make([]byte, len(dev.DeviceName)) + copy(b, dev.DeviceName) + data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) + req.AddData(data) + + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return + } + dev.parseEswitchAttrs(msgs) +} + +// DevLinkGetDeviceList provides a pointer to devlink devices and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, err + } + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + devices, err := parseDevLinkDeviceList(msgs) + if err != nil { + return nil, err + } + for _, d := range devices { + h.getEswitchAttrs(f, d) + } + return devices, nil +} + +// DevLinkGetDeviceList provides a pointer to devlink devices and nil error, +// otherwise returns an error code. +func DevLinkGetDeviceList() ([]*DevlinkDevice, error) { + return pkgHandle.DevLinkGetDeviceList() +} + +func parseDevlinkDevice(msgs [][]byte) (*DevlinkDevice, error) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + dev := &DevlinkDevice{} + if err = dev.parseAttributes(attrs); err != nil { + return nil, err + } + return dev, nil +} + +func (h *Handle) createCmdReq(cmd uint8, bus string, device string) (*GenlFamily, *nl.NetlinkRequest, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, nil, err + } + + msg := &nl.Genlmsg{ + Command: cmd, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK) + req.AddData(msg) + + b := make([]byte, len(bus)+1) + copy(b, bus) + data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) + req.AddData(data) + + b = make([]byte, len(device)+1) + copy(b, device) + data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) + req.AddData(data) + + return f, req, nil +} + +// DevlinkGetDeviceByName provides a pointer to devlink device and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) { + f, req, err := h.createCmdReq(nl.DEVLINK_CMD_GET, Bus, Device) + if err != nil { + return nil, err + } + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + dev, err := parseDevlinkDevice(respmsg) + if err == nil { + h.getEswitchAttrs(f, dev) + } + return dev, err +} + +// DevlinkGetDeviceByName provides a pointer to devlink device and nil error, +// otherwise returns an error code. +func DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) { + return pkgHandle.DevLinkGetDeviceByName(Bus, Device) +} + +// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or +// returns an error code. +// Equivalent to: `devlink dev eswitch set $dev mode switchdev` +// Equivalent to: `devlink dev eswitch set $dev mode legacy` +func (h *Handle) DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { + mode, err := eswitchStringToMode(NewMode) + if err != nil { + return err + } + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_ESWITCH_SET, Dev.BusName, Dev.DeviceName) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_ESWITCH_MODE, nl.Uint16Attr(mode))) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or +// returns an error code. +// Equivalent to: `devlink dev eswitch set $dev mode switchdev` +// Equivalent to: `devlink dev eswitch set $dev mode legacy` +func DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { + return pkgHandle.DevLinkSetEswitchMode(Dev, NewMode) +} diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index cfdf79a08..2dc34b995 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -2,6 +2,7 @@ package netlink import ( "fmt" + "net" ) type Filter interface { @@ -135,6 +136,27 @@ func (action *BpfAction) Attrs() *ActionAttrs { return &action.ActionAttrs } +type ConnmarkAction struct { + ActionAttrs + Zone uint16 +} + +func (action *ConnmarkAction) Type() string { + return "connmark" +} + +func (action *ConnmarkAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewConnmarkAction() *ConnmarkAction { + return &ConnmarkAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + type MirredAct uint8 func (a MirredAct) String() string { @@ -182,49 +204,60 @@ func NewMirredAction(redirIndex int) *MirredAction { } } -// Sel of the U32 filters that contains multiple TcU32Key. This is the copy -// and the frontend representation of nl.TcU32Sel. It is serialized into canonical -// nl.TcU32Sel with the appropriate endianness. -type TcU32Sel struct { - Flags uint8 - Offshift uint8 - Nkeys uint8 - Pad uint8 - Offmask uint16 - Off uint16 - Offoff int16 - Hoff int16 - Hmask uint32 - Keys []TcU32Key -} - -// TcU32Key contained of Sel in the U32 filters. This is the copy and the frontend -// representation of nl.TcU32Key. It is serialized into chanonical nl.TcU32Sel -// with the appropriate endianness. -type TcU32Key struct { - Mask uint32 - Val uint32 - Off int32 - OffMask int32 -} - -// U32 filters on many packet related properties -type U32 struct { - FilterAttrs - ClassId uint32 - Divisor uint32 // Divisor MUST be power of 2. - Hash uint32 - RedirIndex int - Sel *TcU32Sel - Actions []Action +type TunnelKeyAct int8 + +const ( + TCA_TUNNEL_KEY_SET TunnelKeyAct = 1 // set tunnel key + TCA_TUNNEL_KEY_UNSET TunnelKeyAct = 2 // unset tunnel key +) + +type TunnelKeyAction struct { + ActionAttrs + Action TunnelKeyAct + SrcAddr net.IP + DstAddr net.IP + KeyID uint32 + DestPort uint16 } -func (filter *U32) Attrs() *FilterAttrs { - return &filter.FilterAttrs +func (action *TunnelKeyAction) Type() string { + return "tunnel_key" } -func (filter *U32) Type() string { - return "u32" +func (action *TunnelKeyAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewTunnelKeyAction() *TunnelKeyAction { + return &TunnelKeyAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + +type SkbEditAction struct { + ActionAttrs + QueueMapping *uint16 + PType *uint16 + Priority *uint32 + Mark *uint32 +} + +func (action *SkbEditAction) Type() string { + return "skbedit" +} + +func (action *SkbEditAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewSkbEditAction() *SkbEditAction { + return &SkbEditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } } // MatchAll filters match all packets @@ -264,6 +297,8 @@ type BpfFilter struct { Fd int Name string DirectAction bool + Id int + Tag string } func (filter *BpfFilter) Type() string { diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index 90084fae8..ef6fabe81 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -3,10 +3,10 @@ package netlink import ( "bytes" "encoding/binary" + "encoding/hex" "errors" "fmt" "syscall" - "unsafe" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -20,6 +20,35 @@ const ( TC_U32_EAT = nl.TC_U32_EAT ) +// Sel of the U32 filters that contains multiple TcU32Key. This is the type +// alias and the frontend representation of nl.TcU32Sel. It is serialized into +// canonical nl.TcU32Sel with the appropriate endianness. +type TcU32Sel = nl.TcU32Sel + +// TcU32Key contained of Sel in the U32 filters. This is the type alias and the +// frontend representation of nl.TcU32Key. It is serialized into chanonical +// nl.TcU32Sel with the appropriate endianness. +type TcU32Key = nl.TcU32Key + +// U32 filters on many packet related properties +type U32 struct { + FilterAttrs + ClassId uint32 + Divisor uint32 // Divisor MUST be power of 2. + Hash uint32 + RedirIndex int + Sel *TcU32Sel + Actions []Action +} + +func (filter *U32) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *U32) Type() string { + return "u32" +} + // Fw filter filters on firewall marks // NOTE: this is in filter_linux because it refers to nl.TcPolice which // is defined in nl/tc_linux.go @@ -59,7 +88,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("TBF: failed to calculate rate table") } - police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) + police.Burst = Xmittime(uint64(police.Rate.Rate), uint32(buffer)) } police.Mtu = fattrs.Mtu if police.PeakRate.Rate != 0 { @@ -123,8 +152,24 @@ func FilterAdd(filter Filter) error { // FilterAdd will add a filter to the system. // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { + return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL) +} + +// FilterReplace will replace a filter. +// Equivalent to: `tc filter replace $filter` +func FilterReplace(filter Filter) error { + return pkgHandle.FilterReplace(filter) +} + +// FilterReplace will replace a filter. +// Equivalent to: `tc filter replace $filter` +func (h *Handle) FilterReplace(filter Filter) error { + return h.filterModify(filter, unix.NLM_F_CREATE) +} + +func (h *Handle) filterModify(filter Filter, flags int) error { native = nl.NativeEndian() - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -140,8 +185,7 @@ func (h *Handle) FilterAdd(filter Filter) error { switch filter := filter.(type) { case *U32: - // Convert TcU32Sel into nl.TcU32Sel as it is without copy. - sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel)) + sel := filter.Sel if sel == nil { // match all sel = &nl.TcU32Sel{ @@ -385,6 +429,66 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { } toTcGen(action.Attrs(), &mirred.TcGen) aopts.AddRtAttr(nl.TCA_MIRRED_PARMS, mirred.Serialize()) + case *TunnelKeyAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("tunnel_key")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + tun := nl.TcTunnelKey{ + Action: int32(action.Action), + } + toTcGen(action.Attrs(), &tun.TcGen) + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_PARMS, tun.Serialize()) + if action.Action == TCA_TUNNEL_KEY_SET { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_KEY_ID, htonl(action.KeyID)) + if v4 := action.SrcAddr.To4(); v4 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC, v4[:]) + } else if v6 := action.SrcAddr.To16(); v6 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, v6[:]) + } else { + return fmt.Errorf("invalid src addr %s for tunnel_key action", action.SrcAddr) + } + if v4 := action.DstAddr.To4(); v4 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_DST, v4[:]) + } else if v6 := action.DstAddr.To16(); v6 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, v6[:]) + } else { + return fmt.Errorf("invalid dst addr %s for tunnel_key action", action.DstAddr) + } + if action.DestPort != 0 { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_DST_PORT, htons(action.DestPort)) + } + } + case *SkbEditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("skbedit")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + skbedit := nl.TcSkbEdit{} + toTcGen(action.Attrs(), &skbedit.TcGen) + aopts.AddRtAttr(nl.TCA_SKBEDIT_PARMS, skbedit.Serialize()) + if action.QueueMapping != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_QUEUE_MAPPING, nl.Uint16Attr(*action.QueueMapping)) + } + if action.Priority != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_PRIORITY, nl.Uint32Attr(*action.Priority)) + } + if action.PType != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_PTYPE, nl.Uint16Attr(*action.PType)) + } + if action.Mark != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark)) + } + case *ConnmarkAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("connmark")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + connmark := nl.TcConnmark{ + Zone: action.Zone, + } + toTcGen(action.Attrs(), &connmark.TcGen) + aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize()) case *BpfAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -428,8 +532,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &MirredAction{} case "bpf": action = &BpfAction{} + case "connmark": + action = &ConnmarkAction{} case "gact": action = &GenericAction{} + case "tunnel_key": + action = &TunnelKeyAction{} + case "skbedit": + action = &SkbEditAction{} default: break nextattr } @@ -444,11 +554,46 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { switch adatum.Attr.Type { case nl.TCA_MIRRED_PARMS: mirred := *nl.DeserializeTcMirred(adatum.Value) - toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).ActionAttrs = ActionAttrs{} + toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).Ifindex = int(mirred.Ifindex) action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) } + case "tunnel_key": + switch adatum.Attr.Type { + case nl.TCA_TUNNEL_KEY_PARMS: + tun := *nl.DeserializeTunnelKey(adatum.Value) + action.(*TunnelKeyAction).ActionAttrs = ActionAttrs{} + toAttrs(&tun.TcGen, action.Attrs()) + action.(*TunnelKeyAction).Action = TunnelKeyAct(tun.Action) + case nl.TCA_TUNNEL_KEY_ENC_KEY_ID: + action.(*TunnelKeyAction).KeyID = networkOrder.Uint32(adatum.Value[0:4]) + case nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC: + action.(*TunnelKeyAction).SrcAddr = adatum.Value[:] + case nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, nl.TCA_TUNNEL_KEY_ENC_IPV4_DST: + action.(*TunnelKeyAction).DstAddr = adatum.Value[:] + case nl.TCA_TUNNEL_KEY_ENC_DST_PORT: + action.(*TunnelKeyAction).DestPort = ntohs(adatum.Value) + } + case "skbedit": + switch adatum.Attr.Type { + case nl.TCA_SKBEDIT_PARMS: + skbedit := *nl.DeserializeSkbEdit(adatum.Value) + action.(*SkbEditAction).ActionAttrs = ActionAttrs{} + toAttrs(&skbedit.TcGen, action.Attrs()) + case nl.TCA_SKBEDIT_MARK: + mark := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mark = &mark + case nl.TCA_SKBEDIT_PRIORITY: + priority := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Priority = &priority + case nl.TCA_SKBEDIT_PTYPE: + ptype := native.Uint16(adatum.Value[0:2]) + action.(*SkbEditAction).PType = &ptype + case nl.TCA_SKBEDIT_QUEUE_MAPPING: + mapping := native.Uint16(adatum.Value[0:2]) + action.(*SkbEditAction).QueueMapping = &mapping + } case "bpf": switch adatum.Attr.Type { case nl.TCA_ACT_BPF_PARMS: @@ -459,6 +604,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_ACT_BPF_NAME: action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) } + case "connmark": + switch adatum.Attr.Type { + case nl.TCA_CONNMARK_PARMS: + connmark := *nl.DeserializeTcConnmark(adatum.Value) + action.(*ConnmarkAction).ActionAttrs = ActionAttrs{} + toAttrs(&connmark.TcGen, action.Attrs()) + action.(*ConnmarkAction).Zone = connmark.Zone + } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: @@ -483,7 +636,7 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) case nl.TCA_U32_SEL: detailed = true sel := nl.DeserializeTcU32Sel(datum.Value) - u32.Sel = (*TcU32Sel)(unsafe.Pointer(sel)) + u32.Sel = sel if native != networkOrder { // Handle the endianness of attributes u32.Sel.Offmask = native.Uint16(htons(sel.Offmask)) @@ -564,6 +717,10 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 { bpf.DirectAction = true } + case nl.TCA_BPF_ID: + bpf.Id = int(native.Uint32(datum.Value[0:4])) + case nl.TCA_BPF_TAG: + bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1]) } } return detailed, nil @@ -628,7 +785,7 @@ func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, lin } for i := 0; i < 256; i++ { sz = AdjustSize(uint((i+1)<= nl.IPSET_ERR_PRIVATE { + err = nl.IPSetError(uintptr(errno)) + } + } + return +} + +func ipsetUnserialize(msgs [][]byte) (result IPSetResult) { + for _, msg := range msgs { + result.unserialize(msg) + } + return result +} + +func (result *IPSetResult) unserialize(msg []byte) { + result.Nfgenmsg = nl.DeserializeNfgenmsg(msg) + + for attr := range nl.ParseAttributes(msg[4:]) { + switch attr.Type { + case nl.IPSET_ATTR_PROTOCOL: + result.Protocol = attr.Value[0] + case nl.IPSET_ATTR_SETNAME: + result.SetName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_TYPENAME: + result.TypeName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_REVISION: + result.Revision = attr.Value[0] + case nl.IPSET_ATTR_FAMILY: + result.Family = attr.Value[0] + case nl.IPSET_ATTR_FLAGS: + result.Flags = attr.Value[0] + case nl.IPSET_ATTR_DATA | nl.NLA_F_NESTED: + result.parseAttrData(attr.Value) + case nl.IPSET_ATTR_ADT | nl.NLA_F_NESTED: + result.parseAttrADT(attr.Value) + default: + log.Printf("unknown ipset attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func (result *IPSetResult) parseAttrData(data []byte) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_HASHSIZE | nl.NLA_F_NET_BYTEORDER: + result.HashSize = attr.Uint32() + case nl.IPSET_ATTR_MAXELEM | nl.NLA_F_NET_BYTEORDER: + result.MaxElements = attr.Uint32() + case nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + result.Timeout = &val + case nl.IPSET_ATTR_ELEMENTS | nl.NLA_F_NET_BYTEORDER: + result.NumEntries = attr.Uint32() + case nl.IPSET_ATTR_REFERENCES | nl.NLA_F_NET_BYTEORDER: + result.References = attr.Uint32() + case nl.IPSET_ATTR_MEMSIZE | nl.NLA_F_NET_BYTEORDER: + result.SizeInMemory = attr.Uint32() + case nl.IPSET_ATTR_CADT_FLAGS | nl.NLA_F_NET_BYTEORDER: + result.CadtFlags = attr.Uint32() + default: + log.Printf("unknown ipset data attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func (result *IPSetResult) parseAttrADT(data []byte) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_DATA | nl.NLA_F_NESTED: + result.Entries = append(result.Entries, parseIPSetEntry(attr.Value)) + default: + log.Printf("unknown ADT attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func parseIPSetEntry(data []byte) (entry IPSetEntry) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + entry.Timeout = &val + case nl.IPSET_ATTR_BYTES | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint64() + entry.Bytes = &val + case nl.IPSET_ATTR_PACKETS | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint64() + entry.Packets = &val + case nl.IPSET_ATTR_ETHER: + entry.MAC = net.HardwareAddr(attr.Value) + case nl.IPSET_ATTR_COMMENT: + entry.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: + for attr := range nl.ParseAttributes(attr.Value) { + switch attr.Type { + case nl.IPSET_ATTR_IP: + entry.IP = net.IP(attr.Value) + default: + log.Printf("unknown nested ADT attribute from kernel: %+v", attr) + } + } + default: + log.Printf("unknown ADT attribute from kernel: %+v", attr) + } + } + return +} diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index bbe6d0d0f..e2441bd71 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -4,6 +4,7 @@ import ( "fmt" "net" "os" + "strconv" ) // Link represents a link device from netlink. Shared link attributes @@ -41,7 +42,16 @@ type LinkAttrs struct { NetNsID int NumTxQueues int NumRxQueues int + GSOMaxSize uint32 + GSOMaxSegs uint32 Vfs []VfInfo // virtual functions available on link + Group uint32 + Slave LinkSlave +} + +// LinkSlave represents a slave device. +type LinkSlave interface { + SlaveType() string } // VfInfo represents configuration of virtual function @@ -50,9 +60,22 @@ type VfInfo struct { Mac net.HardwareAddr Vlan int Qos int - TxRate int + TxRate int // IFLA_VF_TX_RATE Max TxRate Spoofchk bool LinkState uint32 + MaxTxRate uint32 // IFLA_VF_RATE Max TxRate + MinTxRate uint32 // IFLA_VF_RATE Min TxRate + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + Multicast uint64 + Broadcast uint64 + RxDropped uint64 + TxDropped uint64 + + RssQuery uint32 + Trust uint32 } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -91,7 +114,8 @@ func (s LinkOperState) String() string { // NewLinkAttrs returns LinkAttrs structure filled with default values func NewLinkAttrs() LinkAttrs { return LinkAttrs{ - TxQLen: -1, + NetNsID: -1, + TxQLen: -1, } } @@ -184,10 +208,11 @@ type LinkStatistics64 struct { } type LinkXdp struct { - Fd int - Attached bool - Flags uint32 - ProgId uint32 + Fd int + Attached bool + AttachMode uint32 + Flags uint32 + ProgId uint32 } // Device links cannot be created via netlink. These links @@ -234,6 +259,7 @@ func (ifb *Ifb) Type() string { type Bridge struct { LinkAttrs MulticastSnooping *bool + AgeingTime *uint32 HelloTime *uint32 VlanFiltering *bool } @@ -249,7 +275,8 @@ func (bridge *Bridge) Type() string { // Vlan links have ParentIndex set in their Attrs() type Vlan struct { LinkAttrs - VlanId int + VlanId int + VlanProtocol VlanProtocol } func (vlan *Vlan) Attrs() *LinkAttrs { @@ -308,6 +335,8 @@ type Tuntap struct { NonPersist bool Queues int Fds []*os.File + Owner uint32 + Group uint32 } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -321,7 +350,9 @@ func (tuntap *Tuntap) Type() string { // Veth devices must specify PeerName on create type Veth struct { LinkAttrs - PeerName string // veth on create only + PeerName string // veth on create only + PeerHardwareAddr net.HardwareAddr + PeerNamespace interface{} } func (veth *Veth) Attrs() *LinkAttrs { @@ -332,6 +363,19 @@ func (veth *Veth) Type() string { return "veth" } +// Wireguard represent links of type "wireguard", see https://www.wireguard.com/ +type Wireguard struct { + LinkAttrs +} + +func (wg *Wireguard) Attrs() *LinkAttrs { + return &wg.LinkAttrs +} + +func (wg *Wireguard) Type() string { + return "wireguard" +} + // GenericLink links represent types that are not currently understood // by this netlink library. type GenericLink struct { @@ -390,9 +434,18 @@ const ( IPVLAN_MODE_MAX ) +type IPVlanFlag uint16 + +const ( + IPVLAN_FLAG_BRIDGE IPVlanFlag = iota + IPVLAN_FLAG_PRIVATE + IPVLAN_FLAG_VEPA +) + type IPVlan struct { LinkAttrs Mode IPVlanMode + Flag IPVlanFlag } func (ipvlan *IPVlan) Attrs() *LinkAttrs { @@ -403,6 +456,43 @@ func (ipvlan *IPVlan) Type() string { return "ipvlan" } +// VlanProtocol type +type VlanProtocol int + +func (p VlanProtocol) String() string { + s, ok := VlanProtocolToString[p] + if !ok { + return fmt.Sprintf("VlanProtocol(%d)", p) + } + return s +} + +// StringToVlanProtocol returns vlan protocol, or unknown is the s is invalid. +func StringToVlanProtocol(s string) VlanProtocol { + mode, ok := StringToVlanProtocolMap[s] + if !ok { + return VLAN_PROTOCOL_UNKNOWN + } + return mode +} + +// VlanProtocol possible values +const ( + VLAN_PROTOCOL_UNKNOWN VlanProtocol = 0 + VLAN_PROTOCOL_8021Q VlanProtocol = 0x8100 + VLAN_PROTOCOL_8021AD VlanProtocol = 0x88A8 +) + +var VlanProtocolToString = map[VlanProtocol]string{ + VLAN_PROTOCOL_8021Q: "802.1q", + VLAN_PROTOCOL_8021AD: "802.1ad", +} + +var StringToVlanProtocolMap = map[string]VlanProtocol{ + "802.1q": VLAN_PROTOCOL_8021Q, + "802.1ad": VLAN_PROTOCOL_8021AD, +} + // BondMode type type BondMode int @@ -414,7 +504,7 @@ func (b BondMode) String() string { return s } -// StringToBondMode returns bond mode, or uknonw is the s is invalid. +// StringToBondMode returns bond mode, or unknown is the s is invalid. func StringToBondMode(s string) BondMode { mode, ok := StringToBondModeMap[s] if !ok { @@ -505,7 +595,7 @@ func (b BondXmitHashPolicy) String() string { return s } -// StringToBondXmitHashPolicy returns bond lacp arte, or uknonw is the s is invalid. +// StringToBondXmitHashPolicy returns bond lacp arte, or unknown is the s is invalid. func StringToBondXmitHashPolicy(s string) BondXmitHashPolicy { lacp, ok := StringToBondXmitHashPolicyMap[s] if !ok { @@ -550,7 +640,7 @@ func (b BondLacpRate) String() string { return s } -// StringToBondLacpRate returns bond lacp arte, or uknonw is the s is invalid. +// StringToBondLacpRate returns bond lacp arte, or unknown is the s is invalid. func StringToBondLacpRate(s string) BondLacpRate { lacp, ok := StringToBondLacpRateMap[s] if !ok { @@ -694,6 +784,67 @@ func (bond *Bond) Type() string { return "bond" } +// BondSlaveState represents the values of the IFLA_BOND_SLAVE_STATE bond slave +// attribute, which contains the state of the bond slave. +type BondSlaveState uint8 + +const ( + BondStateActive = iota // Link is active. + BondStateBackup // Link is backup. +) + +func (s BondSlaveState) String() string { + switch s { + case BondStateActive: + return "ACTIVE" + case BondStateBackup: + return "BACKUP" + default: + return strconv.Itoa(int(s)) + } +} + +// BondSlaveState represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave +// attribute, which contains the status of MII link monitoring +type BondSlaveMiiStatus uint8 + +const ( + BondLinkUp = iota // link is up and running. + BondLinkFail // link has just gone down. + BondLinkDown // link has been down for too long time. + BondLinkBack // link is going back. +) + +func (s BondSlaveMiiStatus) String() string { + switch s { + case BondLinkUp: + return "UP" + case BondLinkFail: + return "GOING_DOWN" + case BondLinkDown: + return "DOWN" + case BondLinkBack: + return "GOING_BACK" + default: + return strconv.Itoa(int(s)) + } +} + +type BondSlave struct { + State BondSlaveState + MiiStatus BondSlaveMiiStatus + LinkFailureCount uint32 + PermHardwareAddr net.HardwareAddr + QueueId uint16 + AggregatorId uint16 + AdActorOperPortState uint8 + AdPartnerOperPortState uint16 +} + +func (b *BondSlave) SlaveType() string { + return "bond" +} + // Gretap devices must specify LocalIP and RemoteIP on create type Gretap struct { LinkAttrs @@ -748,14 +899,41 @@ func (iptun *Iptun) Type() string { return "ipip" } -type Sittun struct { +type Ip6tnl struct { LinkAttrs Link uint32 Local net.IP Remote net.IP Ttl uint8 Tos uint8 + Flags uint32 + Proto uint8 + FlowInfo uint32 + EncapLimit uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 +} + +func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs { + return &ip6tnl.LinkAttrs +} + +func (ip6tnl *Ip6tnl) Type() string { + return "ip6tnl" +} + +type Sittun struct { + LinkAttrs + Link uint32 + Ttl uint8 + Tos uint8 PMtuDisc uint8 + Proto uint8 + Local net.IP + Remote net.IP + EncapLimit uint8 EncapType uint16 EncapFlags uint16 EncapSport uint16 @@ -848,11 +1026,68 @@ func (gtp *GTP) Type() string { return "gtp" } +// Virtual XFRM Interfaces +// Named "xfrmi" to prevent confusion with XFRM objects +type Xfrmi struct { + LinkAttrs + Ifid uint32 +} + +func (xfrm *Xfrmi) Attrs() *LinkAttrs { + return &xfrm.LinkAttrs +} + +func (xfrm *Xfrmi) Type() string { + return "xfrm" +} + +// IPoIB interface + +type IPoIBMode uint16 + +func (m *IPoIBMode) String() string { + str, ok := iPoIBModeToString[*m] + if !ok { + return fmt.Sprintf("mode(%d)", *m) + } + return str +} + +const ( + IPOIB_MODE_DATAGRAM = iota + IPOIB_MODE_CONNECTED +) + +var iPoIBModeToString = map[IPoIBMode]string{ + IPOIB_MODE_DATAGRAM: "datagram", + IPOIB_MODE_CONNECTED: "connected", +} + +var StringToIPoIBMode = map[string]IPoIBMode{ + "datagram": IPOIB_MODE_DATAGRAM, + "connected": IPOIB_MODE_CONNECTED, +} + +type IPoIB struct { + LinkAttrs + Pkey uint16 + Mode IPoIBMode + Umcast uint16 +} + +func (ipoib *IPoIB) Attrs() *LinkAttrs { + return &ipoib.LinkAttrs +} + +func (ipoib *IPoIB) Type() string { + return "ipoib" +} + // iproute2 supported devices; // vlan | veth | vcan | dummy | ifb | macvlan | macvtap | // bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | // gre | gretap | ip6gre | ip6gretap | vti | vti6 | nlmon | -// bond_slave | ipvlan +// bond_slave | ipvlan | xfrm // LinkNotFoundError wraps the various not found errors when // getting/reading links. This is intended for better error diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index 4c638c90d..c02fa63b8 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -4,8 +4,10 @@ import ( "bytes" "encoding/binary" "fmt" + "io/ioutil" "net" "os" + "strconv" "strings" "syscall" "unsafe" @@ -17,7 +19,7 @@ import ( const ( SizeofLinkStats32 = 0x5c - SizeofLinkStats64 = 0xd8 + SizeofLinkStats64 = 0xb8 ) const ( @@ -32,6 +34,12 @@ const ( TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI ) +const ( + VF_LINK_STATE_AUTO uint32 = 0 + VF_LINK_STATE_ENABLE uint32 = 1 + VF_LINK_STATE_DISABLE uint32 = 2 +) + var lookupByDump = false var macvlanModes = [...]uint32{ @@ -229,6 +237,37 @@ func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode return err } +// LinkSetMacvlanMode sets the mode of a macvlan or macvtap link device. +// Note that passthrough mode cannot be set to and from and will fail. +// Equivalent to: `ip link set $link type (macvlan|macvtap) mode $mode +func LinkSetMacvlanMode(link Link, mode MacvlanMode) error { + return pkgHandle.LinkSetMacvlanMode(link, mode) +} + +// LinkSetMacvlanMode sets the mode of the macvlan or macvtap link device. +// Note that passthrough mode cannot be set to and from and will fail. +// Equivalent to: `ip link set $link type (macvlan|macvtap) mode $mode +func (h *Handle) LinkSetMacvlanMode(link Link, mode MacvlanMode) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[mode])) + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func BridgeSetMcastSnoop(link Link, on bool) error { return pkgHandle.BridgeSetMcastSnoop(link, on) } @@ -239,6 +278,16 @@ func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { return h.linkModify(bridge, unix.NLM_F_ACK) } +func BridgeSetVlanFiltering(link Link, on bool) error { + return pkgHandle.BridgeSetVlanFiltering(link, on) +} + +func (h *Handle) BridgeSetVlanFiltering(link Link, on bool) error { + bridge := link.(*Bridge) + bridge.VlanFiltering = &on + return h.linkModify(bridge, unix.NLM_F_ACK) +} + func SetPromiscOn(link Link) error { return pkgHandle.SetPromiscOn(link) } @@ -465,6 +514,37 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { return err } +// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos` +func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return pkgHandle.LinkSetVfVlanQos(link, vf, vlan, qos) +} + +// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos` +func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + Qos: uint32(qos), + } + nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + // LinkSetVfTxRate sets the tx rate of a vf for the link. // Equivalent to: `ip link set $link vf $vf rate $rate` func LinkSetVfTxRate(link Link, vf, rate int) error { @@ -495,13 +575,74 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { return err } +// LinkSetVfRate sets the min and max tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate` +func LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return pkgHandle.LinkSetVfRate(link, vf, minRate, maxRate) +} + +// LinkSetVfRate sets the min and max tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate` +func (h *Handle) LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfRate{ + Vf: uint32(vf), + MinTxRate: uint32(minRate), + MaxTxRate: uint32(maxRate), + } + info.AddRtAttr(nl.IFLA_VF_RATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfState enables/disables virtual link state on a vf. +// Equivalent to: `ip link set $link vf $vf state $state` +func LinkSetVfState(link Link, vf int, state uint32) error { + return pkgHandle.LinkSetVfState(link, vf, state) +} + +// LinkSetVfState enables/disables virtual link state on a vf. +// Equivalent to: `ip link set $link vf $vf state $state` +func (h *Handle) LinkSetVfState(link Link, vf int, state uint32) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfLinkState{ + Vf: uint32(vf), + LinkState: state, + } + info.AddRtAttr(nl.IFLA_VF_LINK_STATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + // LinkSetVfSpoofchk enables/disables spoof check on a vf for the link. // Equivalent to: `ip link set $link vf $vf spoofchk $check` func LinkSetVfSpoofchk(link Link, vf int, check bool) error { return pkgHandle.LinkSetVfSpoofchk(link, vf, check) } -// LinkSetVfSpookfchk enables/disables spoof check on a vf for the link. +// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link. // Equivalent to: `ip link set $link vf $vf spoofchk $check` func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { var setting uint32 @@ -581,7 +722,7 @@ func (h *Handle) LinkSetVfGUID(link Link, vf int, vfGuid net.HardwareAddr, guidT var guid uint64 buf := bytes.NewBuffer(vfGuid) - err = binary.Read(buf, binary.LittleEndian, &guid) + err = binary.Read(buf, binary.BigEndian, &guid) if err != nil { return err } @@ -609,13 +750,13 @@ func (h *Handle) LinkSetVfGUID(link Link, vf int, vfGuid net.HardwareAddr, guidT // LinkSetMaster sets the master of the link device. // Equivalent to: `ip link set $link master $master` -func LinkSetMaster(link Link, master *Bridge) error { +func LinkSetMaster(link Link, master Link) error { return pkgHandle.LinkSetMaster(link, master) } // LinkSetMaster sets the master of the link device. // Equivalent to: `ip link set $link master $master` -func (h *Handle) LinkSetMaster(link Link, master *Bridge) error { +func (h *Handle) LinkSetMaster(link Link, master Link) error { index := 0 if master != nil { masterBase := master.Attrs() @@ -948,6 +1089,10 @@ func (h *Handle) LinkAdd(link Link) error { return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) } +func (h *Handle) LinkModify(link Link) error { + return h.linkModify(link, unix.NLM_F_REQUEST|unix.NLM_F_ACK) +} + func (h *Handle) linkModify(link Link, flags int) error { // TODO: support extra data for macvlan base := link.Attrs() @@ -960,8 +1105,6 @@ func (h *Handle) linkModify(link Link, flags int) error { } if isTuntap { - // TODO: support user - // TODO: support group if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { return fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode) } @@ -989,21 +1132,64 @@ func (h *Handle) linkModify(link Link, flags int) error { } req.Flags |= uint16(tuntap.Mode) - + const TUN = "/dev/net/tun" for i := 0; i < queues; i++ { localReq := req - file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + fd, err := unix.Open(TUN, os.O_RDWR|syscall.O_CLOEXEC, 0) if err != nil { cleanupFds(fds) return err } - fds = append(fds, file) - _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) + _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) if errno != 0 { + // close the new fd + unix.Close(fd) + // and the already opened ones cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) } + + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETOWNER, uintptr(tuntap.Owner)) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETOWNER failed [%d], errno %v", i, errno) + } + + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETGROUP, uintptr(tuntap.Group)) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETGROUP failed [%d], errno %v", i, errno) + } + + // Set the tun device to non-blocking before use. The below comment + // taken from: + // + // https://github.com/mistsys/tuntap/commit/161418c25003bbee77d085a34af64d189df62bea + // + // Note there is a complication because in go, if a device node is + // opened, go sets it to use nonblocking I/O. However a /dev/net/tun + // doesn't work with epoll until after the TUNSETIFF ioctl has been + // done. So we open the unix fd directly, do the ioctl, then put the + // fd in nonblocking mode, an then finally wrap it in a os.File, + // which will see the nonblocking mode and add the fd to the + // pollable set, so later on when we Read() from it blocked the + // calling thread in the kernel. + // + // See + // https://github.com/golang/go/issues/30426 + // which got exposed in go 1.13 by the fix to + // https://github.com/golang/go/issues/30624 + err = unix.SetNonblock(fd, true) + if err != nil { + cleanupFds(fds) + return fmt.Errorf("Tuntap set to non-blocking failed [%d], err %v", i, err) + } + + // create the file from the file descriptor and store it + file := os.NewFile(uintptr(fd), TUN) + fds = append(fds, file) + // 1) we only care for the name of the first tap in the multi queue set // 2) if the original name was empty, the localReq has now the actual name // @@ -1014,6 +1200,7 @@ func (h *Handle) linkModify(link Link, flags int) error { if i == 0 { link.Attrs().Name = strings.Trim(string(localReq.Name[:]), "\x00") } + } // only persist interface if NonPersist is NOT set @@ -1086,13 +1273,18 @@ func (h *Handle) linkModify(link Link, flags int) error { native.PutUint32(b, uint32(base.ParentIndex)) data := nl.NewRtAttr(unix.IFLA_LINK, b) req.AddData(data) - } else if link.Type() == "ipvlan" { - return fmt.Errorf("Can't create ipvlan link without ParentIndex") + } else if link.Type() == "ipvlan" || link.Type() == "ipoib" { + return fmt.Errorf("Can't create %s link without ParentIndex", link.Type()) } nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) req.AddData(nameData) + if base.Alias != "" { + alias := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(base.Alias)) + req.AddData(alias) + } + if base.MTU > 0 { mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) req.AddData(mtu) @@ -1118,14 +1310,29 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(rxqueues) } + if base.GSOMaxSegs > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(base.GSOMaxSegs)) + req.AddData(gsoAttr) + } + + if base.GSOMaxSize > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(base.GSOMaxSize)) + req.AddData(gsoAttr) + } + + if base.Group > 0 { + groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group)) + req.AddData(groupAttr) + } + if base.Namespace != nil { var attr *nl.RtAttr - switch base.Namespace.(type) { + switch ns := base.Namespace.(type) { case NsPid: - val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) + val := nl.Uint32Attr(uint32(ns)) attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) case NsFd: - val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) + val := nl.Uint32Attr(uint32(ns)) attr = nl.NewRtAttr(unix.IFLA_NET_NS_FD, val) } @@ -1145,6 +1352,10 @@ func (h *Handle) linkModify(link Link, flags int) error { native.PutUint16(b, uint16(link.VlanId)) data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_VLAN_ID, b) + + if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { + data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) + } case *Veth: data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) @@ -1153,10 +1364,28 @@ func (h *Handle) linkModify(link Link, flags int) error { if base.TxQLen >= 0 { peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } + if base.NumTxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) + } + if base.NumRxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) + } if base.MTU > 0 { peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } - + if link.PeerHardwareAddr != nil { + peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(link.PeerHardwareAddr)) + } + if link.PeerNamespace != nil { + switch ns := link.PeerNamespace.(type) { + case NsPid: + val := nl.Uint32Attr(uint32(ns)) + peer.AddRtAttr(unix.IFLA_NET_NS_PID, val) + case NsFd: + val := nl.Uint32Attr(uint32(ns)) + peer.AddRtAttr(unix.IFLA_NET_NS_FD, val) + } + } case *Vxlan: addVxlanAttrs(link, linkInfo) case *Bond: @@ -1164,6 +1393,7 @@ func (h *Handle) linkModify(link Link, flags int) error { case *IPVlan: data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) case *Macvlan: if link.Mode != MACVLAN_MODE_DEFAULT { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -1178,6 +1408,8 @@ func (h *Handle) linkModify(link Link, flags int) error { addGretapAttrs(link, linkInfo) case *Iptun: addIptunAttrs(link, linkInfo) + case *Ip6tnl: + addIp6tnlAttrs(link, linkInfo) case *Sittun: addSittunAttrs(link, linkInfo) case *Gretun: @@ -1190,6 +1422,10 @@ func (h *Handle) linkModify(link Link, flags int) error { addBridgeAttrs(link, linkInfo) case *GTP: addGTPAttrs(link, linkInfo) + case *Xfrmi: + addXfrmiAttrs(link, linkInfo) + case *IPoIB: + addIPoIBAttrs(link, linkInfo) } req.AddData(linkInfo) @@ -1381,15 +1617,21 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } - base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} + base := NewLinkAttrs() + base.Index = int(msg.Index) + base.RawFlags = msg.Flags + base.Flags = linkFlags(msg.Flags) + base.EncapType = msg.EncapType() if msg.Flags&unix.IFF_PROMISC != 0 { base.Promisc = 1 } var ( - link Link - stats32 []byte - stats64 []byte - linkType string + link Link + stats32 *LinkStatistics32 + stats64 *LinkStatistics64 + linkType string + linkSlave LinkSlave + slaveType string ) for _, attr := range attrs { switch attr.Attr.Type { @@ -1413,6 +1655,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Vlan{} case "veth": link = &Veth{} + case "wireguard": + link = &Wireguard{} case "vxlan": link = &Vxlan{} case "bond": @@ -1429,6 +1673,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Gretap{} case "ipip": link = &Iptun{} + case "ip6tnl": + link = &Ip6tnl{} case "sit": link = &Sittun{} case "gre": @@ -1441,6 +1687,12 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Vrf{} case "gtp": link = >P{} + case "xfrm": + link = &Xfrmi{} + case "tun": + link = &Tuntap{} + case "ipoib": + link = &IPoIB{} default: link = &GenericLink{LinkType: linkType} } @@ -1468,6 +1720,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseGretapData(link, data) case "ipip": parseIptunData(link, data) + case "ip6tnl": + parseIp6tnlData(link, data) case "sit": parseSittunData(link, data) case "gre": @@ -1482,6 +1736,27 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseBridgeData(link, data) case "gtp": parseGTPData(link, data) + case "xfrm": + parseXfrmiData(link, data) + case "tun": + parseTuntapData(link, data) + case "ipoib": + parseIPoIBData(link, data) + } + case nl.IFLA_INFO_SLAVE_KIND: + slaveType = string(info.Value[:len(info.Value)-1]) + switch slaveType { + case "bond": + linkSlave = &BondSlave{} + } + case nl.IFLA_INFO_SLAVE_DATA: + switch slaveType { + case "bond": + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + parseBondSlaveData(linkSlave, data) } } } @@ -1508,9 +1783,15 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { case unix.IFLA_IFALIAS: base.Alias = string(attr.Value[:len(attr.Value)-1]) case unix.IFLA_STATS: - stats32 = attr.Value[:] + stats32 = new(LinkStatistics32) + if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats32); err != nil { + return nil, err + } case unix.IFLA_STATS64: - stats64 = attr.Value[:] + stats64 = new(LinkStatistics64) + if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats64); err != nil { + return nil, err + } case unix.IFLA_XDP: xdp, err := parseLinkXdp(attr.Value[:]) if err != nil { @@ -1531,6 +1812,10 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.OperState = LinkOperState(uint8(attr.Value[0])) case unix.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_GSO_MAX_SIZE: + base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_MAX_SEGS: + base.GSOMaxSegs = native.Uint32(attr.Value[0:4]) case unix.IFLA_VFINFO_LIST: data, err := nl.ParseRouteAttr(attr.Value) if err != nil { @@ -1541,13 +1826,19 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } base.Vfs = vfs + case unix.IFLA_NUM_TX_QUEUES: + base.NumTxQueues = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_NUM_RX_QUEUES: + base.NumRxQueues = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_GROUP: + base.Group = native.Uint32(attr.Value[0:4]) } } if stats64 != nil { - base.Statistics = parseLinkStats64(stats64) + base.Statistics = (*LinkStatistics)(stats64) } else if stats32 != nil { - base.Statistics = parseLinkStats32(stats32) + base.Statistics = (*LinkStatistics)(stats32.to64()) } // Links that don't have IFLA_INFO_KIND are hardware devices @@ -1555,10 +1846,59 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Device{} } *link.Attrs() = base + link.Attrs().Slave = linkSlave + + // If the tuntap attributes are not updated by netlink due to + // an older driver, use sysfs + if link != nil && linkType == "tun" { + tuntap := link.(*Tuntap) + + if tuntap.Mode == 0 { + ifname := tuntap.Attrs().Name + if flags, err := readSysPropAsInt64(ifname, "tun_flags"); err == nil { + + if flags&unix.IFF_TUN != 0 { + tuntap.Mode = unix.IFF_TUN + } else if flags&unix.IFF_TAP != 0 { + tuntap.Mode = unix.IFF_TAP + } + + tuntap.NonPersist = false + if flags&unix.IFF_PERSIST == 0 { + tuntap.NonPersist = true + } + } + + // The sysfs interface for owner/group returns -1 for root user, instead of returning 0. + // So explicitly check for negative value, before assigning the owner uid/gid. + if owner, err := readSysPropAsInt64(ifname, "owner"); err == nil && owner > 0 { + tuntap.Owner = uint32(owner) + } + + if group, err := readSysPropAsInt64(ifname, "group"); err == nil && group > 0 { + tuntap.Group = uint32(group) + } + } + } return link, nil } +func readSysPropAsInt64(ifname, prop string) (int64, error) { + fname := fmt.Sprintf("/sys/class/net/%s/%s", ifname, prop) + contents, err := ioutil.ReadFile(fname) + if err != nil { + return 0, err + } + + num, err := strconv.ParseInt(strings.TrimSpace(string(contents)), 0, 64) + if err == nil { + return num, nil + } + + return 0, err +} + // LinkList gets a list of link devices. // Equivalent to: `ip link show` func LinkList() ([]Link, error) { @@ -1655,13 +1995,19 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { continue @@ -1804,12 +2150,43 @@ func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { return err } +// LinkSetGroup sets the link group id which can be used to perform mass actions +// with iproute2 as well use it as a reference in nft filters. +// Equivalent to: `ip link set $link group $id` +func LinkSetGroup(link Link, group int) error { + return pkgHandle.LinkSetGroup(link, group) +} + +// LinkSetGroup sets the link group id which can be used to perform mass actions +// with iproute2 as well use it as a reference in nft filters. +// Equivalent to: `ip link set $link group $id` +func (h *Handle) LinkSetGroup(link Link, group int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(group)) + + data := nl.NewRtAttr(unix.IFLA_GROUP, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { switch datum.Attr.Type { case nl.IFLA_VLAN_ID: vlan.VlanId = int(native.Uint16(datum.Value[0:2])) + case nl.IFLA_VLAN_PROTOCOL: + vlan.VlanProtocol = VlanProtocol(int(ntohs(datum.Value[0:2]))) } } } @@ -1817,6 +2194,13 @@ func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { vxlan := link.(*Vxlan) for _, datum := range data { + // NOTE(vish): Apparently some messages can be sent with no value. + // We special case GBP here to not change existing + // functionality. It appears that GBP sends a datum.Value + // of null. + if len(datum.Value) == 0 && datum.Attr.Type != nl.IFLA_VXLAN_GBP { + continue + } switch datum.Attr.Type { case nl.IFLA_VXLAN_ID: vxlan.VxlanId = int(native.Uint32(datum.Value[0:4])) @@ -1891,7 +2275,7 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BOND_ARP_INTERVAL: bond.ArpInterval = int(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_ARP_IP_TARGET: - // TODO: implement + bond.ArpIpTargets = parseBondArpIpTargets(data[i].Value) case nl.IFLA_BOND_ARP_VALIDATE: bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_ARP_ALL_TARGETS: @@ -1934,12 +2318,75 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseBondArpIpTargets(value []byte) []net.IP { + data, err := nl.ParseRouteAttr(value) + if err != nil { + return nil + } + + targets := []net.IP{} + for i := range data { + target := net.IP(data[i].Value) + if ip := target.To4(); ip != nil { + targets = append(targets, ip) + continue + } + if ip := target.To16(); ip != nil { + targets = append(targets, ip) + } + } + + return targets +} + +func addBondSlaveAttrs(bondSlave *BondSlave, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil) + + data.AddRtAttr(nl.IFLA_BOND_SLAVE_STATE, nl.Uint8Attr(uint8(bondSlave.State))) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_MII_STATUS, nl.Uint8Attr(uint8(bondSlave.MiiStatus))) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, nl.Uint32Attr(bondSlave.LinkFailureCount)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(bondSlave.QueueId)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, nl.Uint16Attr(bondSlave.AggregatorId)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, nl.Uint8Attr(bondSlave.AdActorOperPortState)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, nl.Uint16Attr(bondSlave.AdPartnerOperPortState)) + + if mac := bondSlave.PermHardwareAddr; mac != nil { + data.AddRtAttr(nl.IFLA_BOND_SLAVE_PERM_HWADDR, []byte(mac)) + } +} + +func parseBondSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { + bondSlave := slave.(*BondSlave) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_SLAVE_STATE: + bondSlave.State = BondSlaveState(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_MII_STATUS: + bondSlave.MiiStatus = BondSlaveMiiStatus(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT: + bondSlave.LinkFailureCount = native.Uint32(data[i].Value[0:4]) + case nl.IFLA_BOND_SLAVE_PERM_HWADDR: + bondSlave.PermHardwareAddr = net.HardwareAddr(data[i].Value[0:6]) + case nl.IFLA_BOND_SLAVE_QUEUE_ID: + bondSlave.QueueId = native.Uint16(data[i].Value[0:2]) + case nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID: + bondSlave.AggregatorId = native.Uint16(data[i].Value[0:2]) + case nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE: + bondSlave.AdActorOperPortState = uint8(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE: + bondSlave.AdPartnerOperPortState = native.Uint16(data[i].Value[0:2]) + } + } +} + func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { ipv := link.(*IPVlan) for _, datum := range data { - if datum.Attr.Type == nl.IFLA_IPVLAN_MODE { + switch datum.Attr.Type { + case nl.IFLA_IPVLAN_MODE: ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) - return + case nl.IFLA_IPVLAN_FLAG: + ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4])) } } } @@ -2081,9 +2528,7 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_ENCAP_FLAGS: gre.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_GRE_COLLECT_METADATA: - if len(datum.Value) > 0 { - gre.FlowBased = int8(datum.Value[0]) != 0 - } + gre.FlowBased = true } } } @@ -2165,14 +2610,6 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { } } -func parseLinkStats32(data []byte) *LinkStatistics { - return (*LinkStatistics)((*LinkStatistics32)(unsafe.Pointer(&data[0:SizeofLinkStats32][0])).to64()) -} - -func parseLinkStats64(data []byte) *LinkStatistics { - return (*LinkStatistics)((*LinkStatistics64)(unsafe.Pointer(&data[0:SizeofLinkStats64][0]))) -} - func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { attrs := nl.NewRtAttr(unix.IFLA_XDP|unix.NLA_F_NESTED, nil) b := make([]byte, 4) @@ -2197,7 +2634,8 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { case nl.IFLA_XDP_FD: xdp.Fd = int(native.Uint32(attr.Value[0:4])) case nl.IFLA_XDP_ATTACHED: - xdp.Attached = attr.Value[0] != 0 + xdp.AttachMode = uint32(attr.Value[0]) + xdp.Attached = xdp.AttachMode != 0 case nl.IFLA_XDP_FLAGS: xdp.Flags = native.Uint32(attr.Value[0:4]) case nl.IFLA_XDP_PROG_ID: @@ -2261,7 +2699,68 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_IPTUN_ENCAP_FLAGS: iptun.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_IPTUN_COLLECT_METADATA: - iptun.FlowBased = int8(datum.Value[0]) != 0 + iptun.FlowBased = true + } + } +} + +func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if ip6tnl.Link != 0 { + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link)) + } + + ip := ip6tnl.Local.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = ip6tnl.Remote.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(ip6tnl.Ttl)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(ip6tnl.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_FLAGS, nl.Uint32Attr(ip6tnl.Flags)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(ip6tnl.Proto)) + data.AddRtAttr(nl.IFLA_IPTUN_FLOWINFO, nl.Uint32Attr(ip6tnl.FlowInfo)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(ip6tnl.EncapLimit)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(ip6tnl.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(ip6tnl.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(ip6tnl.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(ip6tnl.EncapDport)) +} + +func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) { + ip6tnl := link.(*Ip6tnl) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + ip6tnl.Local = net.IP(datum.Value[:16]) + case nl.IFLA_IPTUN_REMOTE: + ip6tnl.Remote = net.IP(datum.Value[:16]) + case nl.IFLA_IPTUN_TTL: + ip6tnl.Ttl = datum.Value[0] + case nl.IFLA_IPTUN_TOS: + ip6tnl.Tos = datum.Value[0] + case nl.IFLA_IPTUN_FLAGS: + ip6tnl.Flags = native.Uint32(datum.Value[:4]) + case nl.IFLA_IPTUN_PROTO: + ip6tnl.Proto = datum.Value[0] + case nl.IFLA_IPTUN_FLOWINFO: + ip6tnl.FlowInfo = native.Uint32(datum.Value[:4]) + case nl.IFLA_IPTUN_ENCAP_LIMIT: + ip6tnl.EncapLimit = datum.Value[0] + case nl.IFLA_IPTUN_ENCAP_TYPE: + ip6tnl.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + ip6tnl.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + ip6tnl.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + ip6tnl.EncapDport = ntohs(datum.Value[0:2]) } } } @@ -2288,8 +2787,10 @@ func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) } + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(sittun.Proto)) data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(sittun.EncapLimit)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) @@ -2305,11 +2806,13 @@ func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_IPTUN_REMOTE: sittun.Remote = net.IP(datum.Value[0:4]) case nl.IFLA_IPTUN_TTL: - sittun.Ttl = uint8(datum.Value[0]) + sittun.Ttl = datum.Value[0] case nl.IFLA_IPTUN_TOS: - sittun.Tos = uint8(datum.Value[0]) + sittun.Tos = datum.Value[0] case nl.IFLA_IPTUN_PMTUDISC: - sittun.PMtuDisc = uint8(datum.Value[0]) + sittun.PMtuDisc = datum.Value[0] + case nl.IFLA_IPTUN_PROTO: + sittun.Proto = datum.Value[0] case nl.IFLA_IPTUN_ENCAP_TYPE: sittun.EncapType = native.Uint16(datum.Value[0:2]) case nl.IFLA_IPTUN_ENCAP_FLAGS: @@ -2396,6 +2899,9 @@ func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { if bridge.MulticastSnooping != nil { data.AddRtAttr(nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping)) } + if bridge.AgeingTime != nil { + data.AddRtAttr(nl.IFLA_BR_AGEING_TIME, nl.Uint32Attr(*bridge.AgeingTime)) + } if bridge.HelloTime != nil { data.AddRtAttr(nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime)) } @@ -2408,6 +2914,9 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { br := bridge.(*Bridge) for _, datum := range data { switch datum.Attr.Type { + case nl.IFLA_BR_AGEING_TIME: + ageingTime := native.Uint32(datum.Value[0:4]) + br.AgeingTime = &ageingTime case nl.IFLA_BR_HELLO_TIME: helloTime := native.Uint32(datum.Value[0:4]) br.HelloTime = &helloTime @@ -2483,11 +2992,52 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { case nl.IFLA_VF_LINK_STATE: ls := nl.DeserializeVfLinkState(element.Value[:]) vf.LinkState = ls.LinkState + case nl.IFLA_VF_RATE: + vfr := nl.DeserializeVfRate(element.Value[:]) + vf.MaxTxRate = vfr.MaxTxRate + vf.MinTxRate = vfr.MinTxRate + case nl.IFLA_VF_STATS: + vfstats := nl.DeserializeVfStats(element.Value[:]) + vf.RxPackets = vfstats.RxPackets + vf.TxPackets = vfstats.TxPackets + vf.RxBytes = vfstats.RxBytes + vf.TxBytes = vfstats.TxBytes + vf.Multicast = vfstats.Multicast + vf.Broadcast = vfstats.Broadcast + vf.RxDropped = vfstats.RxDropped + vf.TxDropped = vfstats.TxDropped + + case nl.IFLA_VF_RSS_QUERY_EN: + result := nl.DeserializeVfRssQueryEn(element.Value) + vf.RssQuery = result.Setting + + case nl.IFLA_VF_TRUST: + result := nl.DeserializeVfTrust(element.Value) + vf.Trust = result.Setting } } return vf } +func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex))) + data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) + +} + +func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { + xfrmi := link.(*Xfrmi) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_XFRM_LINK: + xfrmi.ParentIndex = int(native.Uint32(datum.Value)) + case nl.IFLA_XFRM_IF_ID: + xfrmi.Ifid = native.Uint32(datum.Value) + } + } +} + // LinkSetBondSlave add slave to bond link via ioctl interface. func LinkSetBondSlave(link Link, master *Bond) error { fd, err := getSocketUDP() @@ -2505,6 +3055,52 @@ func LinkSetBondSlave(link Link, master *Bond) error { return nil } +// LinkSetBondSlaveQueueId modify bond slave queue-id. +func (h *Handle) LinkSetBondSlaveQueueId(link Link, queueId uint16) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(queueId)) + + req.AddData(linkInfo) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetBondSlaveQueueId modify bond slave queue-id. +func LinkSetBondSlaveQueueId(link Link, queueId uint16) error { + return pkgHandle.LinkSetBondSlaveQueueId(link, queueId) +} + +func vethStatsSerialize(stats ethtoolStats) ([]byte, error) { + statsSize := int(unsafe.Sizeof(stats)) + int(stats.nStats)*int(unsafe.Sizeof(uint64(0))) + b := make([]byte, 0, statsSize) + buf := bytes.NewBuffer(b) + err := binary.Write(buf, nl.NativeEndian(), stats) + return buf.Bytes()[:statsSize], err +} + +type vethEthtoolStats struct { + Cmd uint32 + NStats uint32 + Peer uint64 + // Newer kernels have XDP stats in here, but we only care + // to extract the peer ifindex here. +} + +func vethStatsDeserialize(b []byte) (vethEthtoolStats, error) { + var stats = vethEthtoolStats{} + err := binary.Read(bytes.NewReader(b), nl.NativeEndian(), &stats) + return stats, err +} + // VethPeerIndex get veth peer index. func VethPeerIndex(link *Veth) (int, error) { fd, err := getSocketUDP() @@ -2519,25 +3115,66 @@ func VethPeerIndex(link *Veth) (int, error) { return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) } - gstrings := ðtoolGstrings{ - cmd: ETHTOOL_GSTRINGS, - stringSet: ETH_SS_STATS, - length: sSet.data[0], + stats := ethtoolStats{ + cmd: ETHTOOL_GSTATS, + nStats: sSet.data[0], } - ifreq.Data = uintptr(unsafe.Pointer(gstrings)) + + buffer, err := vethStatsSerialize(stats) + if err != nil { + return -1, err + } + + ifreq.Data = uintptr(unsafe.Pointer(&buffer[0])) _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) if errno != 0 { return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) } - stats := ðtoolStats{ - cmd: ETHTOOL_GSTATS, - nStats: gstrings.length, + vstats, err := vethStatsDeserialize(buffer) + if err != nil { + return -1, err } - ifreq.Data = uintptr(unsafe.Pointer(stats)) - _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) - if errno != 0 { - return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + + return int(vstats.Peer), nil +} + +func parseTuntapData(link Link, data []syscall.NetlinkRouteAttr) { + tuntap := link.(*Tuntap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_TUN_OWNER: + tuntap.Owner = native.Uint32(datum.Value) + case nl.IFLA_TUN_GROUP: + tuntap.Group = native.Uint32(datum.Value) + case nl.IFLA_TUN_TYPE: + tuntap.Mode = TuntapMode(uint8(datum.Value[0])) + case nl.IFLA_TUN_PERSIST: + tuntap.NonPersist = false + if uint8(datum.Value[0]) == 0 { + tuntap.NonPersist = true + } + } } - return int(stats.data[0]), nil +} + +func parseIPoIBData(link Link, data []syscall.NetlinkRouteAttr) { + ipoib := link.(*IPoIB) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPOIB_PKEY: + ipoib.Pkey = uint16(native.Uint16(datum.Value)) + case nl.IFLA_IPOIB_MODE: + ipoib.Mode = IPoIBMode(native.Uint16(datum.Value)) + case nl.IFLA_IPOIB_UMCAST: + ipoib.Umcast = uint16(native.Uint16(datum.Value)) + } + } +} + +func addIPoIBAttrs(ipoib *IPoIB, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPOIB_PKEY, nl.Uint16Attr(uint16(ipoib.Pkey))) + data.AddRtAttr(nl.IFLA_IPOIB_MODE, nl.Uint16Attr(uint16(ipoib.Mode))) + data.AddRtAttr(nl.IFLA_IPOIB_UMCAST, nl.Uint16Attr(uint16(ipoib.Umcast))) } diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go index 7babd12c1..379e5655f 100644 --- a/vendor/github.com/vishvananda/netlink/neigh.go +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -17,6 +17,7 @@ type Neigh struct { LLIPAddr net.IP //Used in the case of NHRP Vlan int VNI int + MasterIndex int } // String returns $ip/$hwaddr $label diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index 84e66c9ea..fb220d141 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "fmt" "net" "syscall" "unsafe" @@ -20,7 +21,10 @@ const ( NDA_PORT NDA_VNI NDA_IFINDEX - NDA_MAX = NDA_IFINDEX + NDA_MASTER + NDA_LINK_NETNSID + NDA_SRC_VNI + NDA_MAX = NDA_SRC_VNI ) // Neighbor Cache Entry States. @@ -45,6 +49,7 @@ const ( NTF_ROUTER = 0x80 ) +// Ndmsg is for adding, removing or receiving information about a neighbor table entry type Ndmsg struct { Family uint8 Index uint32 @@ -172,45 +177,58 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { req.AddData(vniData) } + if neigh.MasterIndex != 0 { + masterData := nl.NewRtAttr(NDA_MASTER, nl.Uint32Attr(uint32(neigh.MasterIndex))) + req.AddData(masterData) + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } -// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. func NeighList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighList(linkIndex, family) } -// NeighProxyList gets a list of neighbor proxies in the system. +// NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link and ip family. func NeighProxyList(linkIndex, family int) ([]Neigh, error) { return pkgHandle.NeighProxyList(linkIndex, family) } -// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// NeighList returns a list of IP-MAC mappings in the system (ARP table). // Equivalent to: `ip neighbor show`. // The list can be filtered by link and ip family. func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { - return h.neighList(linkIndex, family, 0) + return h.NeighListExecute(Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + }) } -// NeighProxyList gets a list of neighbor proxies in the system. +// NeighProxyList returns a list of neighbor proxies in the system. // Equivalent to: `ip neighbor show proxy`. // The list can be filtered by link, ip family. func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { - return h.neighList(linkIndex, family, NTF_PROXY) + return h.NeighListExecute(Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + Flags: NTF_PROXY, + }) +} + +// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +func NeighListExecute(msg Ndmsg) ([]Neigh, error) { + return pkgHandle.NeighListExecute(msg) } -func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { +// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) - msg := Ndmsg{ - Family: uint8(family), - Index: uint32(linkIndex), - Flags: uint8(flags), - } req.AddData(&msg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) @@ -221,10 +239,22 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { var res []Neigh for _, m := range msgs { ndm := deserializeNdmsg(m) - if linkIndex != 0 && int(ndm.Index) != linkIndex { + if msg.Index != 0 && ndm.Index != msg.Index { // Ignore messages from other interfaces continue } + if msg.Family != 0 && ndm.Family != msg.Family { + continue + } + if msg.State != 0 && ndm.State != msg.State { + continue + } + if msg.Type != 0 && ndm.Type != msg.Type { + continue + } + if msg.Flags != 0 && ndm.Flags != msg.Flags { + continue + } neigh, err := NeighDeserialize(m) if err != nil { @@ -253,14 +283,6 @@ func NeighDeserialize(m []byte) (*Neigh, error) { return nil, err } - // This should be cached for perfomance - // once per table dump - link, err := LinkByIndex(neigh.LinkIndex) - if err != nil { - return nil, err - } - encapType := link.Attrs().EncapType - for _, attr := range attrs { switch attr.Attr.Type { case NDA_DST: @@ -270,13 +292,16 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) attrLen := attr.Attr.Len - unix.SizeofRtAttr - if attrLen == 4 && (encapType == "ipip" || - encapType == "sit" || - encapType == "gre") { + if attrLen == 4 { neigh.LLIPAddr = net.IP(attr.Value) - } else if attrLen == 16 && - encapType == "tunnel6" { - neigh.IP = net.IP(attr.Value) + } else if attrLen == 16 { + // Can be IPv6 or FireWire HWAddr + link, err := LinkByIndex(neigh.LinkIndex) + if err == nil && link.Attrs().EncapType == "tunnel6" { + neigh.IP = net.IP(attr.Value) + } else { + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } @@ -284,6 +309,8 @@ func NeighDeserialize(m []byte) (*Neigh, error) { neigh.Vlan = int(native.Uint16(attr.Value[0:2])) case NDA_VNI: neigh.VNI = int(native.Uint32(attr.Value[0:4])) + case NDA_MASTER: + neigh.MasterIndex = int(native.Uint32(attr.Value[0:4])) } } @@ -323,6 +350,16 @@ func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, opti func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH) + makeRequest := func(family int) error { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, + unix.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(family) + req.AddData(infmsg) + if err := s.Send(req); err != nil { + return err + } + return nil + } if err != nil { return err } @@ -333,26 +370,41 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < }() } if listExisting { - req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, - unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(unix.AF_UNSPEC) - req.AddData(infmsg) - if err := s.Send(req); err != nil { + if err := makeRequest(unix.AF_UNSPEC); err != nil { return err } + // We have to wait for NLMSG_DONE before making AF_BRIDGE request } go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { + if listExisting { + // This will be called after handling AF_UNSPEC + // list request, we have to wait for NLMSG_DONE + // before making another request + if err := makeRequest(unix.AF_BRIDGE); err != nil { + if cberr != nil { + cberr(err) + } + return + } + listExisting = false + } continue } if m.Header.Type == unix.NLMSG_ERROR { diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index 86111b92c..71436f25c 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -16,7 +16,7 @@ func LinkSetMTU(link Link, mtu int) error { return ErrNotImplemented } -func LinkSetMaster(link Link, master *Bridge) error { +func LinkSetMaster(link Link, master Link) error { return ErrNotImplemented } @@ -48,10 +48,18 @@ func LinkSetVfVlan(link Link, vf, vlan int) error { return ErrNotImplemented } +func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return ErrNotImplemented +} + func LinkSetVfTxRate(link Link, vf, rate int) error { return ErrNotImplemented } +func LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return ErrNotImplemented +} + func LinkSetNoMaster(link Link) error { return ErrNotImplemented } @@ -64,6 +72,10 @@ func LinkSetXdpFd(link Link, fd int) error { return ErrNotImplemented } +func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { + return ErrNotImplemented +} + func LinkSetARPOff(link Link) error { return ErrNotImplemented } @@ -152,6 +164,10 @@ func AddrAdd(link Link, addr *Addr) error { return ErrNotImplemented } +func AddrReplace(link Link, addr *Addr) error { + return ErrNotImplemented +} + func AddrDel(link Link, addr *Addr) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/netns_linux.go b/vendor/github.com/vishvananda/netlink/netns_linux.go index 9a0d662af..77cf6f469 100644 --- a/vendor/github.com/vishvananda/netlink/netns_linux.go +++ b/vendor/github.com/vishvananda/netlink/netns_linux.go @@ -51,14 +51,14 @@ func SetNetNsIdByPid(pid, nsid int) error { return pkgHandle.SetNetNsIdByPid(pid, nsid) } -// GetNetNsIdByPid looks up the network namespace ID for a given fd. +// GetNetNsIdByFd looks up the network namespace ID for a given fd. // fd must be an open file descriptor to a namespace file. // Returns -1 if the namespace does not have an ID set. func (h *Handle) GetNetNsIdByFd(fd int) (int, error) { return h.getNetNsId(NETNSA_FD, uint32(fd)) } -// GetNetNsIdByPid looks up the network namespace ID for a given fd. +// GetNetNsIdByFd looks up the network namespace ID for a given fd. // fd must be an open file descriptor to a namespace file. // Returns -1 if the namespace does not have an ID set. func GetNetNsIdByFd(fd int) (int, error) { diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go index 50db3b4cd..6bea4ed02 100644 --- a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -54,24 +54,18 @@ func (msg *IfAddrmsg) Len() int { // __u32 tstamp; /* updated timestamp, hundredths of seconds */ // }; -const IFA_CACHEINFO = 6 -const SizeofIfaCacheInfo = 0x10 - type IfaCacheInfo struct { - IfaPrefered uint32 - IfaValid uint32 - Cstamp uint32 - Tstamp uint32 + unix.IfaCacheinfo } func (msg *IfaCacheInfo) Len() int { - return SizeofIfaCacheInfo + return unix.SizeofIfaCacheinfo } func DeserializeIfaCacheInfo(b []byte) *IfaCacheInfo { - return (*IfaCacheInfo)(unsafe.Pointer(&b[0:SizeofIfaCacheInfo][0])) + return (*IfaCacheInfo)(unsafe.Pointer(&b[0:unix.SizeofIfaCacheinfo][0])) } func (msg *IfaCacheInfo) Serialize() []byte { - return (*(*[SizeofIfaCacheInfo]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofIfaCacheinfo]byte)(unsafe.Pointer(msg)))[:] } diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go index 56cb3544b..14924027e 100644 --- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -40,9 +40,10 @@ const ( NFNETLINK_V0 = 0 ) -// #define NLA_F_NESTED (1 << 15) const ( - NLA_F_NESTED = (1 << 15) + NLA_F_NESTED uint16 = (1 << 15) // #define NLA_F_NESTED (1 << 15) + NLA_F_NET_BYTEORDER uint16 = (1 << 14) // #define NLA_F_NESTED (1 << 14) + NLA_TYPE_MASK = ^(NLA_F_NESTED | NLA_F_NET_BYTEORDER) ) // enum ctattr_type { @@ -79,11 +80,14 @@ const ( CTA_TUPLE_ORIG = 1 CTA_TUPLE_REPLY = 2 CTA_STATUS = 3 + CTA_PROTOINFO = 4 CTA_TIMEOUT = 7 CTA_MARK = 8 CTA_COUNTERS_ORIG = 9 CTA_COUNTERS_REPLY = 10 - CTA_PROTOINFO = 4 + CTA_USE = 11 + CTA_ID = 12 + CTA_TIMESTAMP = 20 ) // enum ctattr_tuple { @@ -180,6 +184,14 @@ const ( CTA_COUNTERS_BYTES = 2 ) +// enum CTA TIMESTAMP TLVs +// CTA_TIMESTAMP_START /* 64bit value */ +// CTA_TIMESTAMP_STOP /* 64bit value */ +const ( + CTA_TIMESTAMP_START = 1 + CTA_TIMESTAMP_STOP = 2 +) + // /* General form of address family dependent message. // */ // struct nfgenmsg { diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go new file mode 100644 index 000000000..db66faaad --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go @@ -0,0 +1,40 @@ +package nl + +// All the following constants are coming from: +// https://github.com/torvalds/linux/blob/master/include/uapi/linux/devlink.h + +const ( + GENL_DEVLINK_VERSION = 1 + GENL_DEVLINK_NAME = "devlink" +) + +const ( + DEVLINK_CMD_GET = 1 + DEVLINK_CMD_ESWITCH_GET = 29 + DEVLINK_CMD_ESWITCH_SET = 30 +) + +const ( + DEVLINK_ATTR_BUS_NAME = 1 + DEVLINK_ATTR_DEV_NAME = 2 + DEVLINK_ATTR_ESWITCH_MODE = 25 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 +) + +const ( + DEVLINK_ESWITCH_MODE_LEGACY = 0 + DEVLINK_ESWITCH_MODE_SWITCHDEV = 1 +) + +const ( + DEVLINK_ESWITCH_INLINE_MODE_NONE = 0 + DEVLINK_ESWITCH_INLINE_MODE_LINK = 1 + DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 2 + DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 3 +) + +const ( + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0 + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go new file mode 100644 index 000000000..a60b4b09d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go @@ -0,0 +1,222 @@ +package nl + +import ( + "strconv" + + "golang.org/x/sys/unix" +) + +const ( + /* The protocol version */ + IPSET_PROTOCOL = 6 + + /* The max length of strings including NUL: set and type identifiers */ + IPSET_MAXNAMELEN = 32 + + /* The maximum permissible comment length we will accept over netlink */ + IPSET_MAX_COMMENT_SIZE = 255 +) + +const ( + _ = iota + IPSET_CMD_PROTOCOL /* 1: Return protocol version */ + IPSET_CMD_CREATE /* 2: Create a new (empty) set */ + IPSET_CMD_DESTROY /* 3: Destroy a (empty) set */ + IPSET_CMD_FLUSH /* 4: Remove all elements from a set */ + IPSET_CMD_RENAME /* 5: Rename a set */ + IPSET_CMD_SWAP /* 6: Swap two sets */ + IPSET_CMD_LIST /* 7: List sets */ + IPSET_CMD_SAVE /* 8: Save sets */ + IPSET_CMD_ADD /* 9: Add an element to a set */ + IPSET_CMD_DEL /* 10: Delete an element from a set */ + IPSET_CMD_TEST /* 11: Test an element in a set */ + IPSET_CMD_HEADER /* 12: Get set header data only */ + IPSET_CMD_TYPE /* 13: Get set type */ +) + +/* Attributes at command level */ +const ( + _ = iota + IPSET_ATTR_PROTOCOL /* 1: Protocol version */ + IPSET_ATTR_SETNAME /* 2: Name of the set */ + IPSET_ATTR_TYPENAME /* 3: Typename */ + IPSET_ATTR_REVISION /* 4: Settype revision */ + IPSET_ATTR_FAMILY /* 5: Settype family */ + IPSET_ATTR_FLAGS /* 6: Flags at command level */ + IPSET_ATTR_DATA /* 7: Nested attributes */ + IPSET_ATTR_ADT /* 8: Multiple data containers */ + IPSET_ATTR_LINENO /* 9: Restore lineno */ + IPSET_ATTR_PROTOCOL_MIN /* 10: Minimal supported version number */ + + IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME /* Setname at rename/swap */ + IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN /* type rev min */ +) + +/* CADT specific attributes */ +const ( + IPSET_ATTR_IP = 1 + IPSET_ATTR_IP_FROM = 1 + IPSET_ATTR_IP_TO = 2 + IPSET_ATTR_CIDR = 3 + IPSET_ATTR_PORT = 4 + IPSET_ATTR_PORT_FROM = 4 + IPSET_ATTR_PORT_TO = 5 + IPSET_ATTR_TIMEOUT = 6 + IPSET_ATTR_PROTO = 7 + IPSET_ATTR_CADT_FLAGS = 8 + IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO /* 9 */ + IPSET_ATTR_MARK = 10 + IPSET_ATTR_MARKMASK = 11 + + /* Reserve empty slots */ + IPSET_ATTR_CADT_MAX = 16 + + /* Create-only specific attributes */ + IPSET_ATTR_GC = 3 + iota + IPSET_ATTR_HASHSIZE + IPSET_ATTR_MAXELEM + IPSET_ATTR_NETMASK + IPSET_ATTR_PROBES + IPSET_ATTR_RESIZE + IPSET_ATTR_SIZE + + /* Kernel-only */ + IPSET_ATTR_ELEMENTS + IPSET_ATTR_REFERENCES + IPSET_ATTR_MEMSIZE + + SET_ATTR_CREATE_MAX +) + +/* ADT specific attributes */ +const ( + IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + iota + 1 + IPSET_ATTR_NAME + IPSET_ATTR_NAMEREF + IPSET_ATTR_IP2 + IPSET_ATTR_CIDR2 + IPSET_ATTR_IP2_TO + IPSET_ATTR_IFACE + IPSET_ATTR_BYTES + IPSET_ATTR_PACKETS + IPSET_ATTR_COMMENT + IPSET_ATTR_SKBMARK + IPSET_ATTR_SKBPRIO + IPSET_ATTR_SKBQUEUE +) + +/* Flags at CADT attribute level, upper half of cmdattrs */ +const ( + IPSET_FLAG_BIT_BEFORE = 0 + IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE) + IPSET_FLAG_BIT_PHYSDEV = 1 + IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV) + IPSET_FLAG_BIT_NOMATCH = 2 + IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH) + IPSET_FLAG_BIT_WITH_COUNTERS = 3 + IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS) + IPSET_FLAG_BIT_WITH_COMMENT = 4 + IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT) + IPSET_FLAG_BIT_WITH_FORCEADD = 5 + IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD) + IPSET_FLAG_BIT_WITH_SKBINFO = 6 + IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO) + IPSET_FLAG_CADT_MAX = 15 +) + +const ( + IPSET_ERR_PRIVATE = 4096 + iota + IPSET_ERR_PROTOCOL + IPSET_ERR_FIND_TYPE + IPSET_ERR_MAX_SETS + IPSET_ERR_BUSY + IPSET_ERR_EXIST_SETNAME2 + IPSET_ERR_TYPE_MISMATCH + IPSET_ERR_EXIST + IPSET_ERR_INVALID_CIDR + IPSET_ERR_INVALID_NETMASK + IPSET_ERR_INVALID_FAMILY + IPSET_ERR_TIMEOUT + IPSET_ERR_REFERENCED + IPSET_ERR_IPADDR_IPV4 + IPSET_ERR_IPADDR_IPV6 + IPSET_ERR_COUNTER + IPSET_ERR_COMMENT + IPSET_ERR_INVALID_MARKMASK + IPSET_ERR_SKBINFO + + /* Type specific error codes */ + IPSET_ERR_TYPE_SPECIFIC = 4352 +) + +type IPSetError uintptr + +func (e IPSetError) Error() string { + switch int(e) { + case IPSET_ERR_PRIVATE: + return "private" + case IPSET_ERR_PROTOCOL: + return "invalid protocol" + case IPSET_ERR_FIND_TYPE: + return "invalid type" + case IPSET_ERR_MAX_SETS: + return "max sets reached" + case IPSET_ERR_BUSY: + return "busy" + case IPSET_ERR_EXIST_SETNAME2: + return "exist_setname2" + case IPSET_ERR_TYPE_MISMATCH: + return "type mismatch" + case IPSET_ERR_EXIST: + return "exist" + case IPSET_ERR_INVALID_CIDR: + return "invalid cidr" + case IPSET_ERR_INVALID_NETMASK: + return "invalid netmask" + case IPSET_ERR_INVALID_FAMILY: + return "invalid family" + case IPSET_ERR_TIMEOUT: + return "timeout" + case IPSET_ERR_REFERENCED: + return "referenced" + case IPSET_ERR_IPADDR_IPV4: + return "invalid ipv4 address" + case IPSET_ERR_IPADDR_IPV6: + return "invalid ipv6 address" + case IPSET_ERR_COUNTER: + return "invalid counter" + case IPSET_ERR_COMMENT: + return "invalid comment" + case IPSET_ERR_INVALID_MARKMASK: + return "invalid markmask" + case IPSET_ERR_SKBINFO: + return "skbinfo" + default: + return "errno " + strconv.Itoa(int(e)) + } +} + +func GetIpsetFlags(cmd int) int { + switch cmd { + case IPSET_CMD_CREATE: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK | unix.NLM_F_CREATE + case IPSET_CMD_DESTROY, + IPSET_CMD_FLUSH, + IPSET_CMD_RENAME, + IPSET_CMD_SWAP, + IPSET_CMD_TEST: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK + case IPSET_CMD_LIST, + IPSET_CMD_SAVE: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK | unix.NLM_F_ROOT | unix.NLM_F_MATCH | unix.NLM_F_DUMP + case IPSET_CMD_ADD, + IPSET_CMD_DEL: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK + case IPSET_CMD_HEADER, + IPSET_CMD_TYPE, + IPSET_CMD_PROTOCOL: + return unix.NLM_F_REQUEST + default: + return 0 + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index 7078edefd..faee2fa03 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -1,6 +1,8 @@ package nl import ( + "bytes" + "encoding/binary" "unsafe" ) @@ -13,7 +15,9 @@ const ( IFLA_INFO_KIND IFLA_INFO_DATA IFLA_INFO_XSTATS - IFLA_INFO_MAX = IFLA_INFO_XSTATS + IFLA_INFO_SLAVE_KIND + IFLA_INFO_SLAVE_DATA + IFLA_INFO_MAX = IFLA_INFO_SLAVE_DATA ) const ( @@ -87,7 +91,8 @@ const ( const ( IFLA_IPVLAN_UNSPEC = iota IFLA_IPVLAN_MODE - IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE + IFLA_IPVLAN_FLAG + IFLA_IPVLAN_MAX = IFLA_IPVLAN_FLAG ) const ( @@ -164,6 +169,8 @@ const ( IFLA_BOND_SLAVE_PERM_HWADDR IFLA_BOND_SLAVE_QUEUE_ID IFLA_BOND_SLAVE_AD_AGGREGATOR_ID + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE ) const ( @@ -238,7 +245,9 @@ const ( IFLA_VF_STATS_TX_BYTES IFLA_VF_STATS_BROADCAST IFLA_VF_STATS_MULTICAST - IFLA_VF_STATS_MAX = IFLA_VF_STATS_MULTICAST + IFLA_VF_STATS_RX_DROPPED + IFLA_VF_STATS_TX_DROPPED + IFLA_VF_STATS_MAX = IFLA_VF_STATS_TX_DROPPED ) const ( @@ -321,6 +330,59 @@ func (msg *VfTxRate) Serialize() []byte { return (*(*[SizeofVfTxRate]byte)(unsafe.Pointer(msg)))[:] } +//struct ifla_vf_stats { +// __u64 rx_packets; +// __u64 tx_packets; +// __u64 rx_bytes; +// __u64 tx_bytes; +// __u64 broadcast; +// __u64 multicast; +//}; + +type VfStats struct { + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + Multicast uint64 + Broadcast uint64 + RxDropped uint64 + TxDropped uint64 +} + +func DeserializeVfStats(b []byte) VfStats { + var vfstat VfStats + stats, err := ParseRouteAttr(b) + if err != nil { + return vfstat + } + var valueVar uint64 + for _, stat := range stats { + if err := binary.Read(bytes.NewBuffer(stat.Value), NativeEndian(), &valueVar); err != nil { + break + } + switch stat.Attr.Type { + case IFLA_VF_STATS_RX_PACKETS: + vfstat.RxPackets = valueVar + case IFLA_VF_STATS_TX_PACKETS: + vfstat.TxPackets = valueVar + case IFLA_VF_STATS_RX_BYTES: + vfstat.RxBytes = valueVar + case IFLA_VF_STATS_TX_BYTES: + vfstat.TxBytes = valueVar + case IFLA_VF_STATS_MULTICAST: + vfstat.Multicast = valueVar + case IFLA_VF_STATS_BROADCAST: + vfstat.Broadcast = valueVar + case IFLA_VF_STATS_RX_DROPPED: + vfstat.RxDropped = valueVar + case IFLA_VF_STATS_TX_DROPPED: + vfstat.TxDropped = valueVar + } + } + return vfstat +} + // struct ifla_vf_rate { // __u32 vf; // __u32 min_tx_rate; /* Min Bandwidth in Mbps */ @@ -473,6 +535,14 @@ const ( IFLA_XDP_MAX = IFLA_XDP_PROG_ID ) +// XDP program attach mode (used as dump value for IFLA_XDP_ATTACHED) +const ( + XDP_ATTACHED_NONE = iota + XDP_ATTACHED_DRV + XDP_ATTACHED_SKB + XDP_ATTACHED_HW +) + const ( IFLA_IPTUN_UNSPEC = iota IFLA_IPTUN_LINK @@ -573,3 +643,33 @@ const ( GTP_ROLE_GGSN = iota GTP_ROLE_SGSN ) + +const ( + IFLA_XFRM_UNSPEC = iota + IFLA_XFRM_LINK + IFLA_XFRM_IF_ID + + IFLA_XFRM_MAX = iota - 1 +) + +const ( + IFLA_TUN_UNSPEC = iota + IFLA_TUN_OWNER + IFLA_TUN_GROUP + IFLA_TUN_TYPE + IFLA_TUN_PI + IFLA_TUN_VNET_HDR + IFLA_TUN_PERSIST + IFLA_TUN_MULTI_QUEUE + IFLA_TUN_NUM_QUEUES + IFLA_TUN_NUM_DISABLED_QUEUES + IFLA_TUN_MAX = IFLA_TUN_NUM_DISABLED_QUEUES +) + +const ( + IFLA_IPOIB_UNSPEC = iota + IFLA_IPOIB_PKEY + IFLA_IPOIB_MODE + IFLA_IPOIB_UMCAST + IFLA_IPOIB_MAX = IFLA_IPOIB_UMCAST +) diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 7ef2ed926..cef64b82e 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -21,11 +21,13 @@ const ( FAMILY_ALL = unix.AF_UNSPEC FAMILY_V4 = unix.AF_INET FAMILY_V6 = unix.AF_INET6 - FAMILY_MPLS = AF_MPLS + FAMILY_MPLS = unix.AF_MPLS // Arbitrary set value (greater than default 4k) to allow receiving // from kernel more verbose messages e.g. for statistics, // tc rules or filters, or other more memory requiring data. RECEIVE_BUFFER_SIZE = 65536 + // Kernel netlink pid + PidKernel uint32 = 0 ) // SupportedNlFamilies contains the list of netlink families this netlink package supports @@ -46,7 +48,7 @@ func GetIPFamily(ip net.IP) int { var nativeEndian binary.ByteOrder -// Get native endianness for the system +// NativeEndian gets native endianness for the system func NativeEndian() binary.ByteOrder { if nativeEndian == nil { var x uint32 = 0x01020304 @@ -257,6 +259,29 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { return msg } +type Uint32Attribute struct { + Type uint16 + Value uint32 +} + +func (a *Uint32Attribute) Serialize() []byte { + native := NativeEndian() + buf := make([]byte, rtaAlignOf(8)) + native.PutUint16(buf[0:2], 8) + native.PutUint16(buf[2:4], a.Type) + + if a.Type&NLA_F_NET_BYTEORDER != 0 { + binary.BigEndian.PutUint32(buf[4:], a.Value) + } else { + native.PutUint32(buf[4:], a.Value) + } + return buf +} + +func (a *Uint32Attribute) Len() int { + return 8 +} + // Extend RtAttr to handle data and children type RtAttr struct { unix.RtAttr @@ -420,10 +445,13 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro done: for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { return nil, err } + if from.Pid != PidKernel { + return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) + } for _, m := range msgs { if m.Header.Seq != req.Seq { if sharedSocket { @@ -432,12 +460,9 @@ done: return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) } if m.Header.Pid != pid { - return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) - } - if m.Header.Type == unix.NLMSG_DONE { - break done + continue } - if m.Header.Type == unix.NLMSG_ERROR { + if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { native := NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { @@ -617,21 +642,31 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error { return nil } -func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { fd := int(atomic.LoadInt32(&s.fd)) if fd < 0 { - return nil, fmt.Errorf("Receive called on a closed socket") + return nil, nil, fmt.Errorf("Receive called on a closed socket") } - rb := make([]byte, RECEIVE_BUFFER_SIZE) - nr, _, err := unix.Recvfrom(fd, rb, 0) + var fromAddr *unix.SockaddrNetlink + var rb [RECEIVE_BUFFER_SIZE]byte + nr, from, err := unix.Recvfrom(fd, rb[:], 0) if err != nil { - return nil, err + return nil, nil, err + } + fromAddr, ok := from.(*unix.SockaddrNetlink) + if !ok { + return nil, nil, fmt.Errorf("Error converting to netlink sockaddr") } if nr < unix.NLMSG_HDRLEN { - return nil, fmt.Errorf("Got short response from netlink") + return nil, nil, fmt.Errorf("Got short response from netlink") + } + rb2 := make([]byte, nr) + copy(rb2, rb[:nr]) + nl, err := syscall.ParseNetlinkMessage(rb2) + if err != nil { + return nil, nil, err } - rb = rb[:nr] - return syscall.ParseNetlinkMessage(rb) + return nl, fromAddr, nil } // SetSendTimeout allows to set a send timeout on the socket diff --git a/vendor/github.com/vishvananda/netlink/nl/parse_attr.go b/vendor/github.com/vishvananda/netlink/nl/parse_attr.go new file mode 100644 index 000000000..19eb8f28e --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/parse_attr.go @@ -0,0 +1,67 @@ +package nl + +import ( + "encoding/binary" + "fmt" +) + +type Attribute struct { + Type uint16 + Value []byte +} + +func ParseAttributes(data []byte) <-chan Attribute { + native := NativeEndian() + result := make(chan Attribute) + + go func() { + i := 0 + for i+4 < len(data) { + length := int(native.Uint16(data[i : i+2])) + + result <- Attribute{ + Type: native.Uint16(data[i+2 : i+4]), + Value: data[i+4 : i+length], + } + i += rtaAlignOf(length) + } + close(result) + }() + + return result +} + +func PrintAttributes(data []byte) { + printAttributes(data, 0) +} + +func printAttributes(data []byte, level int) { + for attr := range ParseAttributes(data) { + for i := 0; i < level; i++ { + print("> ") + } + nested := attr.Type&NLA_F_NESTED != 0 + fmt.Printf("type=%d nested=%v len=%v %v\n", attr.Type&NLA_TYPE_MASK, nested, len(attr.Value), attr.Value) + if nested { + printAttributes(attr.Value, level+1) + } + } +} + +// Uint32 returns the uint32 value respecting the NET_BYTEORDER flag +func (attr *Attribute) Uint32() uint32 { + if attr.Type&NLA_F_NET_BYTEORDER != 0 { + return binary.BigEndian.Uint32(attr.Value) + } else { + return NativeEndian().Uint32(attr.Value) + } +} + +// Uint64 returns the uint64 value respecting the NET_BYTEORDER flag +func (attr *Attribute) Uint64() uint64 { + if attr.Type&NLA_F_NET_BYTEORDER != 0 { + return binary.BigEndian.Uint64(attr.Value) + } else { + return NativeEndian().Uint64(attr.Value) + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go index 47d736c28..1224b747d 100644 --- a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go @@ -9,8 +9,10 @@ const ( ) const ( - RDMA_NLDEV_CMD_GET = 1 - RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_GET = 1 + RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_SYS_GET = 6 + RDMA_NLDEV_CMD_SYS_SET = 7 ) const ( @@ -28,4 +30,6 @@ const ( RDMA_NLDEV_ATTR_PORT_STATE = 12 RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 + RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 + RDMA_NLDEV_NET_NS_FD = 68 ) diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go index 0ff1bba4f..4a01e6e59 100644 --- a/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -21,6 +21,13 @@ const ( FRA_TABLE /* Extended table id */ FRA_FWMASK /* mask for netfilter mark */ FRA_OIFNAME + FRA_PAD + FRA_L3MDEV /* iif or oif is l3mdev goto its table */ + FRA_UID_RANGE /* UID range */ + FRA_PROTOCOL /* Originator of the rule */ + FRA_IP_PROTO /* ip proto */ + FRA_SPORT_RANGE /* sport */ + FRA_DPORT_RANGE /* dport */ ) // ip rule netlink request types @@ -42,16 +49,6 @@ const ( TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/ ) -const ( - AF_MPLS = 28 -) - -const ( - RTA_NEWDST = 0x13 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 -) - // RTA_ENCAP subtype const ( MPLS_IPTUNNEL_UNSPEC = iota diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index 6c20b67a1..c24d53eb7 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -89,8 +89,14 @@ const ( SizeofTcU32Key = 0x10 SizeofTcU32Sel = 0x10 // without keys SizeofTcGen = 0x14 + SizeofTcConnmark = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 + SizeofTcTunnelKey = SizeofTcGen + 0x04 + SizeofTcSkbEdit = SizeofTcGen SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 + SizeofTcSfqQopt = 0x0b + SizeofTcSfqRedStats = 0x18 + SizeofTcSfqQoptV1 = SizeofTcSfqQopt + SizeofTcSfqRedStats + 0x1c ) // struct tcmsg { @@ -647,11 +653,47 @@ const ( TCA_BPF_FD TCA_BPF_NAME TCA_BPF_FLAGS - TCA_BPF_MAX = TCA_BPF_FLAGS + TCA_BPF_FLAGS_GEN + TCA_BPF_TAG + TCA_BPF_ID + TCA_BPF_MAX = TCA_BPF_ID ) type TcBpf TcGen +const ( + TCA_ACT_CONNMARK = 14 +) + +const ( + TCA_CONNMARK_UNSPEC = iota + TCA_CONNMARK_PARMS + TCA_CONNMARK_TM + TCA_CONNMARK_MAX = TCA_CONNMARK_TM +) + +// struct tc_connmark { +// tc_gen; +// __u16 zone; +// }; + +type TcConnmark struct { + TcGen + Zone uint16 +} + +func (msg *TcConnmark) Len() int { + return SizeofTcConnmark +} + +func DeserializeTcConnmark(b []byte) *TcConnmark { + return (*TcConnmark)(unsafe.Pointer(&b[0:SizeofTcConnmark][0])) +} + +func (x *TcConnmark) Serialize() []byte { + return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_ACT_MIRRED = 8 ) @@ -687,6 +729,69 @@ func (x *TcMirred) Serialize() []byte { return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:] } +const ( + TCA_TUNNEL_KEY_UNSPEC = iota + TCA_TUNNEL_KEY_TM + TCA_TUNNEL_KEY_PARMS + TCA_TUNNEL_KEY_ENC_IPV4_SRC + TCA_TUNNEL_KEY_ENC_IPV4_DST + TCA_TUNNEL_KEY_ENC_IPV6_SRC + TCA_TUNNEL_KEY_ENC_IPV6_DST + TCA_TUNNEL_KEY_ENC_KEY_ID + TCA_TUNNEL_KEY_PAD + TCA_TUNNEL_KEY_ENC_DST_PORT + TCA_TUNNEL_KEY_NO_CSUM + TCA_TUNNEL_KEY_ENC_OPTS + TCA_TUNNEL_KEY_ENC_TOS + TCA_TUNNEL_KEY_ENC_TTL + TCA_TUNNEL_KEY_MAX +) + +type TcTunnelKey struct { + TcGen + Action int32 +} + +func (x *TcTunnelKey) Len() int { + return SizeofTcTunnelKey +} + +func DeserializeTunnelKey(b []byte) *TcTunnelKey { + return (*TcTunnelKey)(unsafe.Pointer(&b[0:SizeofTcTunnelKey][0])) +} + +func (x *TcTunnelKey) Serialize() []byte { + return (*(*[SizeofTcTunnelKey]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_SKBEDIT_UNSPEC = iota + TCA_SKBEDIT_TM + TCA_SKBEDIT_PARMS + TCA_SKBEDIT_PRIORITY + TCA_SKBEDIT_QUEUE_MAPPING + TCA_SKBEDIT_MARK + TCA_SKBEDIT_PAD + TCA_SKBEDIT_PTYPE + TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK +) + +type TcSkbEdit struct { + TcGen +} + +func (x *TcSkbEdit) Len() int { + return SizeofTcSkbEdit +} + +func DeserializeSkbEdit(b []byte) *TcSkbEdit { + return (*TcSkbEdit)(unsafe.Pointer(&b[0:SizeofTcSkbEdit][0])) +} + +func (x *TcSkbEdit) Serialize() []byte { + return (*(*[SizeofTcSkbEdit]byte)(unsafe.Pointer(x)))[:] +} + // struct tc_police { // __u32 index; // int action; @@ -776,3 +881,103 @@ const ( TCA_HFSC_FSC TCA_HFSC_USC ) + +// struct tc_sfq_qopt { +// unsigned quantum; /* Bytes per round allocated to flow */ +// int perturb_period; /* Period of hash perturbation */ +// __u32 limit; /* Maximal packets in queue */ +// unsigned divisor; /* Hash divisor */ +// unsigned flows; /* Maximal number of flows */ +// }; + +type TcSfqQopt struct { + Quantum uint8 + Perturb int32 + Limit uint32 + Divisor uint8 + Flows uint8 +} + +func (x *TcSfqQopt) Len() int { + return SizeofTcSfqQopt +} + +func DeserializeTcSfqQopt(b []byte) *TcSfqQopt { + return (*TcSfqQopt)(unsafe.Pointer(&b[0:SizeofTcSfqQopt][0])) +} + +func (x *TcSfqQopt) Serialize() []byte { + return (*(*[SizeofTcSfqQopt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_sfqred_stats { +// __u32 prob_drop; /* Early drops, below max threshold */ +// __u32 forced_drop; /* Early drops, after max threshold */ +// __u32 prob_mark; /* Marked packets, below max threshold */ +// __u32 forced_mark; /* Marked packets, after max threshold */ +// __u32 prob_mark_head; /* Marked packets, below max threshold */ +// __u32 forced_mark_head;/* Marked packets, after max threshold */ +// }; +type TcSfqRedStats struct { + ProbDrop uint32 + ForcedDrop uint32 + ProbMark uint32 + ForcedMark uint32 + ProbMarkHead uint32 + ForcedMarkHead uint32 +} + +func (x *TcSfqRedStats) Len() int { + return SizeofTcSfqRedStats +} + +func DeserializeTcSfqRedStats(b []byte) *TcSfqRedStats { + return (*TcSfqRedStats)(unsafe.Pointer(&b[0:SizeofTcSfqRedStats][0])) +} + +func (x *TcSfqRedStats) Serialize() []byte { + return (*(*[SizeofTcSfqRedStats]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_sfq_qopt_v1 { +// struct tc_sfq_qopt v0; +// unsigned int depth; /* max number of packets per flow */ +// unsigned int headdrop; +// /* SFQRED parameters */ +// __u32 limit; /* HARD maximal flow queue length (bytes) */ +// __u32 qth_min; /* Min average length threshold (bytes) */ +// __u32 qth_max; /* Max average length threshold (bytes) */ +// unsigned char Wlog; /* log(W) */ +// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ +// unsigned char Scell_log; /* cell size for idle damping */ +// unsigned char flags; +// __u32 max_P; /* probability, high resolution */ +// /* SFQRED stats */ +// struct tc_sfqred_stats stats; +// }; +type TcSfqQoptV1 struct { + TcSfqQopt + Depth uint32 + HeadDrop uint32 + Limit uint32 + QthMin uint32 + QthMax uint32 + Wlog byte + Plog byte + ScellLog byte + Flags byte + MaxP uint32 + TcSfqRedStats +} + +func (x *TcSfqQoptV1) Len() int { + return SizeofTcSfqQoptV1 +} + +func DeserializeTcSfqQoptV1(b []byte) *TcSfqQoptV1 { + return (*TcSfqQoptV1)(unsafe.Pointer(&b[0:SizeofTcSfqQoptV1][0])) +} + +func (x *TcSfqQoptV1) Serialize() []byte { + return (*(*[SizeofTcSfqQoptV1]byte)(unsafe.Pointer(x)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index 09a2ffa10..dce9073f7 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -50,34 +50,44 @@ const ( // Attribute types const ( /* Netlink message attributes. */ - XFRMA_UNSPEC = 0x00 - XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */ - XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */ - XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */ - XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */ - XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */ - XFRMA_SA = 0x06 /* struct xfrm_usersa_info */ - XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */ - XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */ - XFRMA_LTIME_VAL = 0x09 - XFRMA_REPLAY_VAL = 0x0a - XFRMA_REPLAY_THRESH = 0x0b - XFRMA_ETIMER_THRESH = 0x0c - XFRMA_SRCADDR = 0x0d /* xfrm_address_t */ - XFRMA_COADDR = 0x0e /* xfrm_address_t */ - XFRMA_LASTUSED = 0x0f /* unsigned long */ - XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */ - XFRMA_MIGRATE = 0x11 - XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */ - XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */ - XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */ - XFRMA_MARK = 0x15 /* struct xfrm_mark */ - XFRMA_TFCPAD = 0x16 /* __u32 */ - XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */ - XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */ - XFRMA_MAX = 0x18 + XFRMA_UNSPEC = iota + XFRMA_ALG_AUTH /* struct xfrm_algo */ + XFRMA_ALG_CRYPT /* struct xfrm_algo */ + XFRMA_ALG_COMP /* struct xfrm_algo */ + XFRMA_ENCAP /* struct xfrm_algo + struct xfrm_encap_tmpl */ + XFRMA_TMPL /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA /* struct xfrm_usersa_info */ + XFRMA_POLICY /* struct xfrm_userpolicy_info */ + XFRMA_SEC_CTX /* struct xfrm_sec_ctx */ + XFRMA_LTIME_VAL + XFRMA_REPLAY_VAL + XFRMA_REPLAY_THRESH + XFRMA_ETIMER_THRESH + XFRMA_SRCADDR /* xfrm_address_t */ + XFRMA_COADDR /* xfrm_address_t */ + XFRMA_LASTUSED /* unsigned long */ + XFRMA_POLICY_TYPE /* struct xfrm_userpolicy_type */ + XFRMA_MIGRATE + XFRMA_ALG_AEAD /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS /* struct xfrm_user_kmaddress */ + XFRMA_ALG_AUTH_TRUNC /* struct xfrm_algo_auth */ + XFRMA_MARK /* struct xfrm_mark */ + XFRMA_TFCPAD /* __u32 */ + XFRMA_REPLAY_ESN_VAL /* struct xfrm_replay_esn */ + XFRMA_SA_EXTRA_FLAGS /* __u32 */ + XFRMA_PROTO /* __u8 */ + XFRMA_ADDRESS_FILTER /* struct xfrm_address_filter */ + XFRMA_PAD + XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ + XFRMA_SET_MARK /* __u32 */ + XFRMA_SET_MARK_MASK /* __u32 */ + XFRMA_IF_ID /* __u32 */ + + XFRMA_MAX = iota - 1 ) +const XFRMA_OUTPUT_MARK = XFRMA_SET_MARK + const ( SizeofXfrmAddress = 0x10 SizeofXfrmSelector = 0x38 diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go index b6290fd54..43a947f22 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go @@ -13,7 +13,7 @@ const ( SizeofXfrmAlgoAuth = 0x48 SizeofXfrmAlgoAEAD = 0x48 SizeofXfrmEncapTmpl = 0x18 - SizeofXfrmUsersaFlush = 0x8 + SizeofXfrmUsersaFlush = 0x1 SizeofXfrmReplayStateEsn = 0x18 ) diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go index 3540fb032..8418569ee 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -285,7 +285,7 @@ type Fq struct { func (fq *Fq) String() string { return fmt.Sprintf( - "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitalQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateTreshold: %v}", + "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}", fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, ) } @@ -338,3 +338,27 @@ func (qdisc *FqCodel) Attrs() *QdiscAttrs { func (qdisc *FqCodel) Type() string { return "fq_codel" } + +type Sfq struct { + QdiscAttrs + // TODO: Only the simplified options for SFQ are handled here. Support for the extended one can be added later. + Quantum uint8 + Perturb uint8 + Limit uint32 + Divisor uint8 +} + +func (sfq *Sfq) String() string { + return fmt.Sprintf( + "{%v -- Quantum: %v, Perturb: %v, Limit: %v, Divisor: %v}", + sfq.Attrs(), sfq.Quantum, sfq.Perturb, sfq.Limit, sfq.Divisor, + ) +} + +func (qdisc *Sfq) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Sfq) Type() string { + return "sfq" +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index e9eee5908..d0e1ca194 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -278,6 +278,14 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.FlowDefaultRate > 0 { options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) } + case *Sfq: + opt := nl.TcSfqQoptV1{} + opt.TcSfqQopt.Quantum = qdisc.Quantum + opt.TcSfqQopt.Perturb = int32(qdisc.Perturb) + opt.TcSfqQopt.Limit = qdisc.Limit + opt.TcSfqQopt.Divisor = qdisc.Divisor + + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) default: options = nil } @@ -362,6 +370,8 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { qdisc = &FqCodel{} case "netem": qdisc = &Netem{} + case "sfq": + qdisc = &Sfq{} default: qdisc = &GenericQdisc{QdiscType: qdiscType} } @@ -417,6 +427,10 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { if err := parseNetemData(qdisc, attr.Value); err != nil { return nil, err } + case "sfq": + if err := parseSfqData(qdisc, attr.Value); err != nil { + return nil, err + } // no options for ingress } @@ -582,6 +596,17 @@ func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { return nil } +func parseSfqData(qdisc Qdisc, value []byte) error { + sfq := qdisc.(*Sfq) + opt := nl.DeserializeTcSfqQoptV1(value) + sfq.Quantum = opt.TcSfqQopt.Quantum + sfq.Perturb = uint8(opt.TcSfqQopt.Perturb) + sfq.Limit = opt.TcSfqQopt.Limit + sfq.Divisor = opt.TcSfqQopt.Divisor + + return nil +} + const ( TIME_UNITS_PER_SEC = 1000000 ) @@ -598,10 +623,10 @@ func initClock() { return } parts := strings.Split(strings.TrimSpace(string(data)), " ") - if len(parts) < 3 { + if len(parts) < 4 { return } - var vals [3]uint64 + var vals [4]uint64 for i := range vals { val, err := strconv.ParseUint(parts[i], 16, 32) if err != nil { @@ -615,7 +640,12 @@ func initClock() { } clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor - hz = float64(vals[0]) + if vals[2] == 1000000 { + // ref https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/lib/utils.c#n963 + hz = float64(vals[3]) + } else { + hz = 100 + } } func TickInUsec() float64 { @@ -663,6 +693,7 @@ func latency(rate uint64, limit, buffer uint32) float64 { return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer)) } -func Xmittime(rate uint64, size uint32) float64 { - return TickInUsec() * TIME_UNITS_PER_SEC * (float64(size) / float64(rate)) +func Xmittime(rate uint64, size uint32) uint32 { + // https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/tc/tc_core.c#n62 + return time2Tick(uint32(TIME_UNITS_PER_SEC * (float64(size) / float64(rate)))) } diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go index e7d34ddb5..ff014ca4c 100644 --- a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go +++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -77,28 +77,39 @@ func executeOneGetRdmaLink(data []byte) (*RdmaLink, error) { return &link, nil } -func execRdmaGetLink(req *nl.NetlinkRequest, name string) (*RdmaLink, error) { +func execRdmaSetLink(req *nl.NetlinkRequest) error { + + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkList gets a list of RDMA link devices. +// Equivalent to: `rdma dev show` +func RdmaLinkList() ([]*RdmaLink, error) { + return pkgHandle.RdmaLinkList() +} + +// RdmaLinkList gets a list of RDMA link devices. +// Equivalent to: `rdma dev show` +func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) msgs, err := req.Execute(unix.NETLINK_RDMA, 0) if err != nil { return nil, err } + + var res []*RdmaLink for _, m := range msgs { link, err := executeOneGetRdmaLink(m) if err != nil { return nil, err } - if link.Attrs.Name == name { - return link, nil - } + res = append(res, link) } - return nil, fmt.Errorf("Rdma device %v not found", name) -} -func execRdmaSetLink(req *nl.NetlinkRequest) error { - - _, err := req.Execute(unix.NETLINK_RDMA, 0) - return err + return res, nil } // RdmaLinkByName finds a link by name and returns a pointer to the object if @@ -110,11 +121,16 @@ func RdmaLinkByName(name string) (*RdmaLink, error) { // RdmaLinkByName finds a link by name and returns a pointer to the object if // found and nil error, otherwise returns error code. func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) { - - proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) - req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) - - return execRdmaGetLink(req, name) + links, err := h.RdmaLinkList() + if err != nil { + return nil, err + } + for _, link := range links { + if link.Attrs.Name == name { + return link, nil + } + } + return nil, fmt.Errorf("Rdma device %v not found", name) } // RdmaLinkSetName sets the name of the rdma link device. Return nil on success @@ -143,3 +159,122 @@ func (h *Handle) RdmaLinkSetName(link *RdmaLink, name string) error { return execRdmaSetLink(req) } + +func netnsModeToString(mode uint8) string { + switch mode { + case 0: + return "exclusive" + case 1: + return "shared" + default: + return "unknown" + } +} + +func executeOneGetRdmaNetnsMode(data []byte) (string, error) { + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE: + var mode uint8 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &mode) + return netnsModeToString(mode), nil + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return "", fmt.Errorf("Invalid netns mode") +} + +// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem +// Returns mode string and error status as nil on success or returns error +// otherwise. +// Equivalent to: `rdma system show netns' +func RdmaSystemGetNetnsMode() (string, error) { + return pkgHandle.RdmaSystemGetNetnsMode() +} + +// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem +// Returns mode string and error status as nil on success or returns error +// otherwise. +// Equivalent to: `rdma system show netns' +func (h *Handle) RdmaSystemGetNetnsMode() (string, error) { + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return "", err + } + if len(msgs) == 0 { + return "", fmt.Errorf("No valid response from kernel") + } + return executeOneGetRdmaNetnsMode(msgs[0]) +} + +func netnsModeStringToUint8(mode string) (uint8, error) { + switch mode { + case "exclusive": + return 0, nil + case "shared": + return 1, nil + default: + return 0, fmt.Errorf("Invalid mode; %q", mode) + } +} + +// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem +// Returns nil on success or appropriate error code. +// Equivalent to: `rdma system set netns { shared | exclusive }' +func RdmaSystemSetNetnsMode(NewMode string) error { + return pkgHandle.RdmaSystemSetNetnsMode(NewMode) +} + +// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem +// Returns nil on success or appropriate error code. +// Equivalent to: `rdma system set netns { shared | exclusive }' +func (h *Handle) RdmaSystemSetNetnsMode(NewMode string) error { + value, err := netnsModeStringToUint8(NewMode) + if err != nil { + return err + } + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + data := nl.NewRtAttr(nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE, []byte{value}) + req.AddData(data) + + _, err = req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `rdma dev set $dev netns $ns` +func RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { + return pkgHandle.RdmaLinkSetNsFd(link, fd) +} + +// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `rdma dev set $dev netns $ns` +func (h *Handle) RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, + nl.Uint32Attr(link.Attrs.Index)) + req.AddData(data) + + data = nl.NewRtAttr(nl.RDMA_NLDEV_NET_NS_FD, nl.Uint32Attr(fd)) + req.AddData(data) + + return execRdmaSetLink(req) +} diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 58ff1af60..b16254174 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -29,25 +29,38 @@ type Encap interface { // Route represents a netlink route. type Route struct { - LinkIndex int - ILinkIndex int - Scope Scope - Dst *net.IPNet - Src net.IP - Gw net.IP - MultiPath []*NexthopInfo - Protocol int - Priority int - Table int - Type int - Tos int - Flags int - MPLSDst *int - NewDst Destination - Encap Encap - MTU int - AdvMSS int - Hoplimit int + LinkIndex int + ILinkIndex int + Scope Scope + Dst *net.IPNet + Src net.IP + Gw net.IP + MultiPath []*NexthopInfo + Protocol int + Priority int + Table int + Type int + Tos int + Flags int + MPLSDst *int + NewDst Destination + Encap Encap + MTU int + Window int + Rtt int + RttVar int + Ssthresh int + Cwnd int + AdvMSS int + Reordering int + Hoplimit int + InitCwnd int + Features int + RtoMin int + InitRwnd int + QuickACK int + Congctl string + FastOpenNoCookie int } func (r Route) String() string { diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index 93fb87aa3..4e778a417 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -33,6 +33,9 @@ const ( RT_FILTER_GW RT_FILTER_TABLE RT_FILTER_HOPLIMIT + RT_FILTER_PRIORITY + RT_FILTER_MARK + RT_FILTER_MASK ) const ( @@ -261,7 +264,7 @@ func (e *SEG6Encap) Equal(x Encap) bool { return true } -// SEG6Local definitions +// SEG6LocalEncap definitions type SEG6LocalEncap struct { Flags [nl.SEG6_LOCAL_MAX]bool Action int @@ -519,18 +522,18 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if err != nil { return err } - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_NEWDST, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_NEWDST, buf)) } if route.Encap != nil { buf := make([]byte, 2) native.PutUint16(buf, uint16(route.Encap.Type())) - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) buf, err := route.Encap.Encode() if err != nil { return err } - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } if route.Src != nil { @@ -594,17 +597,17 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if err != nil { return err } - children = append(children, nl.NewRtAttr(nl.RTA_NEWDST, buf)) + children = append(children, nl.NewRtAttr(unix.RTA_NEWDST, buf)) } if nh.Encap != nil { buf := make([]byte, 2) native.PutUint16(buf, uint16(nh.Encap.Type())) - rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf)) + children = append(children, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) buf, err := nh.Encap.Encode() if err != nil { return err } - children = append(children, nl.NewRtAttr(nl.RTA_ENCAP, buf)) + children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } rtnh.Children = children buf = append(buf, rtnh.Serialize()...) @@ -639,19 +642,70 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } var metrics []*nl.RtAttr - // TODO: support other rta_metric values if route.MTU > 0 { b := nl.Uint32Attr(uint32(route.MTU)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) } + if route.Window > 0 { + b := nl.Uint32Attr(uint32(route.Window)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_WINDOW, b)) + } + if route.Rtt > 0 { + b := nl.Uint32Attr(uint32(route.Rtt)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTT, b)) + } + if route.RttVar > 0 { + b := nl.Uint32Attr(uint32(route.RttVar)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTTVAR, b)) + } + if route.Ssthresh > 0 { + b := nl.Uint32Attr(uint32(route.Ssthresh)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_SSTHRESH, b)) + } + if route.Cwnd > 0 { + b := nl.Uint32Attr(uint32(route.Cwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_CWND, b)) + } if route.AdvMSS > 0 { b := nl.Uint32Attr(uint32(route.AdvMSS)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) } + if route.Reordering > 0 { + b := nl.Uint32Attr(uint32(route.Reordering)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_REORDERING, b)) + } if route.Hoplimit > 0 { b := nl.Uint32Attr(uint32(route.Hoplimit)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_HOPLIMIT, b)) } + if route.InitCwnd > 0 { + b := nl.Uint32Attr(uint32(route.InitCwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_INITCWND, b)) + } + if route.Features > 0 { + b := nl.Uint32Attr(uint32(route.Features)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_FEATURES, b)) + } + if route.RtoMin > 0 { + b := nl.Uint32Attr(uint32(route.RtoMin)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTO_MIN, b)) + } + if route.InitRwnd > 0 { + b := nl.Uint32Attr(uint32(route.InitRwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_INITRWND, b)) + } + if route.QuickACK > 0 { + b := nl.Uint32Attr(uint32(route.QuickACK)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_QUICKACK, b)) + } + if route.Congctl != "" { + b := nl.ZeroTerminated(route.Congctl) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_CC_ALGO, b)) + } + if route.FastOpenNoCookie > 0 { + b := nl.Uint32Attr(uint32(route.FastOpenNoCookie)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_FASTOPEN_NO_COOKIE, b)) + } if metrics != nil { attr := nl.NewRtAttr(unix.RTA_METRICS, nil) @@ -839,7 +893,7 @@ func deserializeRoute(m []byte) (Route, error) { switch attr.Attr.Type { case unix.RTA_GATEWAY: info.Gw = net.IP(attr.Value) - case nl.RTA_NEWDST: + case unix.RTA_NEWDST: var d Destination switch msg.Family { case nl.FAMILY_MPLS: @@ -849,9 +903,9 @@ func deserializeRoute(m []byte) (Route, error) { return nil, nil, err } info.NewDst = d - case nl.RTA_ENCAP_TYPE: + case unix.RTA_ENCAP_TYPE: encapType = attr - case nl.RTA_ENCAP: + case unix.RTA_ENCAP: encap = attr } } @@ -880,7 +934,7 @@ func deserializeRoute(m []byte) (Route, error) { route.MultiPath = append(route.MultiPath, info) rest = buf } - case nl.RTA_NEWDST: + case unix.RTA_NEWDST: var d Destination switch msg.Family { case nl.FAMILY_MPLS: @@ -890,9 +944,9 @@ func deserializeRoute(m []byte) (Route, error) { return route, err } route.NewDst = d - case nl.RTA_ENCAP_TYPE: + case unix.RTA_ENCAP_TYPE: encapType = attr - case nl.RTA_ENCAP: + case unix.RTA_ENCAP: encap = attr case unix.RTA_METRICS: metrics, err := nl.ParseRouteAttr(attr.Value) @@ -903,10 +957,36 @@ func deserializeRoute(m []byte) (Route, error) { switch metric.Attr.Type { case unix.RTAX_MTU: route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_WINDOW: + route.Window = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTT: + route.Rtt = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTTVAR: + route.RttVar = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_SSTHRESH: + route.Ssthresh = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_CWND: + route.Cwnd = int(native.Uint32(metric.Value[0:4])) case unix.RTAX_ADVMSS: route.AdvMSS = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_REORDERING: + route.Reordering = int(native.Uint32(metric.Value[0:4])) case unix.RTAX_HOPLIMIT: route.Hoplimit = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_INITCWND: + route.InitCwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_FEATURES: + route.Features = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTO_MIN: + route.RtoMin = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_INITRWND: + route.InitRwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_QUICKACK: + route.QuickACK = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_CC_ALGO: + route.Congctl = nl.BytesToString(metric.Value) + case unix.RTAX_FASTOPEN_NO_COOKIE: + route.FastOpenNoCookie = int(native.Uint32(metric.Value[0:4])) } } } @@ -938,15 +1018,27 @@ func deserializeRoute(m []byte) (Route, error) { return route, nil } +// RouteGetOptions contains a set of options to use with +// RouteGetWithOptions +type RouteGetOptions struct { + VrfName string +} + +// RouteGetWithOptions gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get <> vrf '. +func RouteGetWithOptions(destination net.IP, options *RouteGetOptions) ([]Route, error) { + return pkgHandle.RouteGetWithOptions(destination, options) +} + // RouteGet gets a route to a specific destination from the host system. // Equivalent to: 'ip route get'. func RouteGet(destination net.IP) ([]Route, error) { return pkgHandle.RouteGet(destination) } -// RouteGet gets a route to a specific destination from the host system. -// Equivalent to: 'ip route get'. -func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { +// RouteGetWithOptions gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get <> vrf '. +func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOptions) ([]Route, error) { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) family := nl.GetIPFamily(destination) var destinationData []byte @@ -966,6 +1058,20 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) req.AddData(rtaDst) + if options != nil { + link, err := LinkByName(options.VrfName) + if err != nil { + return nil, err + } + var ( + b = make([]byte, 4) + native = nl.NativeEndian() + ) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { return nil, err @@ -980,7 +1086,12 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { res = append(res, route) } return res, nil +} +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { + return h.RouteGetWithOptions(destination, nil) } // RouteSubscribe takes a chan down which notifications will be sent @@ -1037,13 +1148,19 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { if cberr != nil { cberr(err) } return } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } for _, m := range msgs { if m.Header.Type == unix.NLMSG_DONE { continue diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index 7fc8ae5df..95f2facfb 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -12,6 +12,7 @@ type Rule struct { Table int Mark int Mask int + Tos uint TunID uint Goto int Src *net.IPNet @@ -22,6 +23,8 @@ type Rule struct { SuppressIfgroup int SuppressPrefixlen int Invert bool + Dport *RulePortRange + Sport *RulePortRange } func (r Rule) String() string { @@ -40,3 +43,14 @@ func NewRule() *Rule { Flow: -1, } } + +// NewRulePortRange creates rule sport/dport range. +func NewRulePortRange(start, end uint16) *RulePortRange { + return &RulePortRange{Start: start, End: end} +} + +// RulePortRange represents rule sport/dport range. +type RulePortRange struct { + Start uint16 + End uint16 +} diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index 6238ae458..40474f30e 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "bytes" "fmt" "net" @@ -55,6 +56,9 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { if rule.Table >= 0 && rule.Table < 256 { msg.Table = uint8(rule.Table) } + if rule.Tos != 0 { + msg.Tos = uint8(rule.Tos) + } var dstFamily uint8 var rtAttrs []*nl.RtAttr @@ -138,18 +142,28 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { } } if rule.IifName != "" { - req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName))) + req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName+"\x00"))) } if rule.OifName != "" { - req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName))) + req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName+"\x00"))) } if rule.Goto >= 0 { - msg.Type = nl.FR_ACT_NOP + msg.Type = nl.FR_ACT_GOTO b := make([]byte, 4) native.PutUint32(b, uint32(rule.Goto)) req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } + if rule.Dport != nil { + b := rule.Dport.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_DPORT_RANGE, b)) + } + + if rule.Sport != nil { + b := rule.Sport.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_SPORT_RANGE, b)) + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -163,6 +177,19 @@ func RuleList(family int) ([]Rule, error) { // RuleList lists rules in the system. // Equivalent to: ip rule list func (h *Handle) RuleList(family int) ([]Rule, error) { + return h.RuleListFiltered(family, nil, 0) +} + +// RuleListFiltered gets a list of rules in the system filtered by the +// specified rule template `filter`. +// Equivalent to: ip rule list +func RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { + return pkgHandle.RuleListFiltered(family, filter, filterMask) +} + +// RuleListFiltered lists rules in the system. +// Equivalent to: ip rule list +func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) @@ -184,6 +211,7 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule := NewRule() rule.Invert = msg.Flags&FibRuleInvert > 0 + rule.Tos = uint(msg.Tos) for j := range attrs { switch attrs[j].Attr.Type { @@ -225,10 +253,44 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_PRIORITY: rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_DPORT_RANGE: + rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_SPORT_RANGE: + rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + } + } + + if filter != nil { + switch { + case filterMask&RT_FILTER_SRC != 0 && + (rule.Src == nil || rule.Src.String() != filter.Src.String()): + continue + case filterMask&RT_FILTER_DST != 0 && + (rule.Dst == nil || rule.Dst.String() != filter.Dst.String()): + continue + case filterMask&RT_FILTER_TABLE != 0 && + filter.Table != unix.RT_TABLE_UNSPEC && rule.Table != filter.Table: + continue + case filterMask&RT_FILTER_TOS != 0 && rule.Tos != filter.Tos: + continue + case filterMask&RT_FILTER_PRIORITY != 0 && rule.Priority != filter.Priority: + continue + case filterMask&RT_FILTER_MARK != 0 && rule.Mark != filter.Mark: + continue + case filterMask&RT_FILTER_MASK != 0 && rule.Mask != filter.Mask: + continue } } + res = append(res, *rule) } return res, nil } + +func (pr *RulePortRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 2), make([]byte, 2)} + native.PutUint16(b[0], pr.Start) + native.PutUint16(b[1], pr.End) + return bytes.Join(b, []byte{}) +} diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index 99e9fb4d8..e4e7f7ac3 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -49,10 +50,15 @@ func (r *socketRequest) Serialize() []byte { native.PutUint32(b.Next(4), r.States) networkOrder.PutUint16(b.Next(2), r.ID.SourcePort) networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort) - copy(b.Next(4), r.ID.Source.To4()) - b.Next(12) - copy(b.Next(4), r.ID.Destination.To4()) - b.Next(12) + if r.Family == unix.AF_INET6 { + copy(b.Next(16), r.ID.Source) + copy(b.Next(16), r.ID.Destination) + } else { + copy(b.Next(4), r.ID.Source.To4()) + b.Next(12) + copy(b.Next(4), r.ID.Destination.To4()) + b.Next(12) + } native.PutUint32(b.Next(4), r.ID.Interface) native.PutUint32(b.Next(4), r.ID.Cookie[0]) native.PutUint32(b.Next(4), r.ID.Cookie[1]) @@ -89,10 +95,15 @@ func (s *Socket) deserialize(b []byte) error { s.Retrans = rb.Read() s.ID.SourcePort = networkOrder.Uint16(rb.Next(2)) s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2)) - s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) - rb.Next(12) - s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) - rb.Next(12) + if s.Family == unix.AF_INET6 { + s.ID.Source = net.IP(rb.Next(16)) + s.ID.Destination = net.IP(rb.Next(16)) + } else { + s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + } s.ID.Interface = native.Uint32(rb.Next(4)) s.ID.Cookie[0] = native.Uint32(rb.Next(4)) s.ID.Cookie[1] = native.Uint32(rb.Next(4)) @@ -141,10 +152,13 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { }, }) s.Send(req) - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { return nil, err } + if from.Pid != nl.PidKernel { + return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + } if len(msgs) == 0 { return nil, errors.New("no message nor error from netlink") } @@ -157,3 +171,68 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { } return sock, nil } + +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type. +func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + if err != nil { + return nil, err + } + defer s.Close() + + req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: INET_DIAG_INFO, + States: uint32(0xfff), // All TCP states + }) + s.Send(req) + + var result []*InetDiagTCPInfoResp +loop: + for { + msgs, from, err := s.Receive() + if err != nil { + return nil, err + } + if from.Pid != nl.PidKernel { + return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + } + if len(msgs) == 0 { + return nil, errors.New("no message nor error from netlink") + } + + for _, m := range msgs { + switch m.Header.Type { + case unix.NLMSG_DONE: + break loop + case unix.NLMSG_ERROR: + native := nl.NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + return nil, syscall.Errno(-error) + } + sockInfo := &Socket{} + if err := sockInfo.deserialize(m.Data); err != nil { + return nil, err + } + attrs, err := nl.ParseRouteAttr(m.Data[sizeofSocket:]) + if err != nil { + return nil, err + } + var tcpInfo *TCPInfo + for _, a := range attrs { + if a.Attr.Type == INET_DIAG_INFO { + tcpInfo = &TCPInfo{} + if err := tcpInfo.deserialize(a.Value); err != nil { + return nil, err + } + break + } + } + r := &InetDiagTCPInfoResp{InetDiagMsg: sockInfo, TCPInfo: tcpInfo} + result = append(result, r) + } + } + return result, nil +} diff --git a/vendor/github.com/vishvananda/netlink/tcp.go b/vendor/github.com/vishvananda/netlink/tcp.go new file mode 100644 index 000000000..4a42ee5a6 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/tcp.go @@ -0,0 +1,18 @@ +package netlink + +// TCP States +const ( + TCP_ESTABLISHED = iota + 0x01 + TCP_SYN_SENT + TCP_SYN_RECV + TCP_FIN_WAIT1 + TCP_FIN_WAIT2 + TCP_TIME_WAIT + TCP_CLOSE + TCP_CLOSE_WAIT + TCP_LAST_ACK + TCP_LISTEN + TCP_CLOSING + TCP_NEW_SYN_REC + TCP_MAX_STATES +) diff --git a/vendor/github.com/vishvananda/netlink/tcp_linux.go b/vendor/github.com/vishvananda/netlink/tcp_linux.go new file mode 100644 index 000000000..741ea1655 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/tcp_linux.go @@ -0,0 +1,393 @@ +package netlink + +import ( + "bytes" + "io" +) + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Snd_wscale uint8 // no uint4 + Rcv_wscale uint8 + Delivery_rate_app_limited uint8 + Fastopen_client_fail uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ + Bytes_received uint64 /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ + Segs_out uint32 /* RFC4898 tcpEStatsPerfSegsOut */ + Segs_in uint32 /* RFC4898 tcpEStatsPerfSegsIn */ + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 /* RFC4898 tcpEStatsDataSegsIn */ + Data_segs_out uint32 /* RFC4898 tcpEStatsDataSegsOut */ + Delivery_rate uint64 + Busy_time uint64 /* Time (usec) busy sending data */ + Rwnd_limited uint64 /* Time (usec) limited by receive window */ + Sndbuf_limited uint64 /* Time (usec) limited by send buffer */ + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ + Bytes_retrans uint64 /* RFC4898 tcpEStatsPerfOctetsRetrans */ + Dsack_dups uint32 /* RFC4898 tcpEStatsStackDSACKDups */ + Reord_seen uint32 /* reordering events seen */ + Rcv_ooopack uint32 /* Out-of-order packets received */ + Snd_wnd uint32 /* peer's advertised receive window after * scaling (bytes) */ +} + +func checkDeserErr(err error) error { + if err == io.EOF { + return nil + } + return err +} + +func (t *TCPInfo) deserialize(b []byte) error { + var err error + rb := bytes.NewBuffer(b) + + t.State, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Ca_state, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Retransmits, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Probes, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Backoff, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Options, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + scales, err := rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Snd_wscale = scales >> 4 // first 4 bits + t.Rcv_wscale = scales & 0xf // last 4 bits + + rateLimAndFastOpen, err := rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Delivery_rate_app_limited = rateLimAndFastOpen >> 7 // get first bit + t.Fastopen_client_fail = rateLimAndFastOpen >> 5 & 3 // get next two bits + + next := rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rto = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Ato = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_mss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_mss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Unacked = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Sacked = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Lost = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Retrans = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Fackets = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_data_sent = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_ack_sent = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_data_recv = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_ack_recv = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Pmtu = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_ssthresh = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rtt = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rttvar = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_ssthresh = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_cwnd = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Advmss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Reordering = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_rtt = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_space = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Total_retrans = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Pacing_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Max_pacing_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_acked = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_received = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Segs_out = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Segs_in = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Notsent_bytes = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Min_rtt = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Data_segs_in = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Data_segs_out = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Delivery_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Busy_time = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Rwnd_limited = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Sndbuf_limited = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Delivered = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Delivered_ce = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_sent = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_retrans = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Dsack_dups = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Reord_seen = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_ooopack = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_wnd = native.Uint32(next) + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go index efe72ddf2..985d3a915 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -54,11 +54,15 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error go func() { defer close(ch) for { - msgs, err := s.Receive() + msgs, from, err := s.Receive() if err != nil { errorChan <- err return } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } for _, m := range msgs { switch m.Header.Type { case nl.XFRM_MSG_EXPIRE: diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go index 3e46f463b..6219d2772 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy.go @@ -85,11 +85,12 @@ type XfrmPolicy struct { Index int Action PolicyAction Ifindex int + Ifid int Mark *XfrmMark Tmpls []XfrmPolicyTmpl } func (p XfrmPolicy) String() string { - return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Mark: %s, Tmpls: %s}", - p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Mark, p.Tmpls) + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", + p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index b35e002dd..a4e132ef5 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -92,6 +92,9 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } @@ -185,6 +188,9 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo req.AddData(out) } + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + resType := nl.XFRM_MSG_NEWPOLICY if nlProto == nl.XFRM_MSG_DELPOLICY { resType = 0 @@ -248,6 +254,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { policy.Mark = new(XfrmMark) policy.Mark.Value = mark.Value policy.Mark.Mask = mark.Mask + case nl.XFRMA_IF_ID: + policy.Ifid = int(native.Uint32(attr.Value)) } } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go index d14740dc5..483d8934a 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -94,6 +94,8 @@ type XfrmState struct { Limits XfrmStateLimits Statistics XfrmStateStats Mark *XfrmMark + OutputMark int + Ifid int Auth *XfrmStateAlgo Crypt *XfrmStateAlgo Aead *XfrmStateAlgo @@ -102,8 +104,8 @@ type XfrmState struct { } func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", - sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %d, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) } func (sa XfrmState) Print(stats bool) string { if !stats { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 9e7a8d96c..66c99423c 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -158,6 +158,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow)) req.AddData(out) } + if state.OutputMark != 0 { + out := nl.NewRtAttr(nl.XFRMA_OUTPUT_MARK, nl.Uint32Attr(uint32(state.OutputMark))) + req.AddData(out) + } + + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -270,6 +277,9 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(out) } + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { resType = 0 @@ -367,6 +377,10 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { state.Mark = new(XfrmMark) state.Mark.Value = mark.Value state.Mark.Mask = mark.Mask + case nl.XFRMA_OUTPUT_MARK: + state.OutputMark = int(native.Uint32(attr.Value)) + case nl.XFRMA_IF_ID: + state.Ifid = int(native.Uint32(attr.Value)) } } diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md index 66a5f7258..6b45cfb89 100644 --- a/vendor/github.com/vishvananda/netns/README.md +++ b/vendor/github.com/vishvananda/netns/README.md @@ -37,7 +37,6 @@ func main() { // Create a new network namespace newns, _ := netns.New() - netns.Set(newns) defer newns.Close() // Do something with the network namespace diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/netns.go index dd2f21570..116befd54 100644 --- a/vendor/github.com/vishvananda/netns/netns.go +++ b/vendor/github.com/vishvananda/netns/netns.go @@ -10,7 +10,8 @@ package netns import ( "fmt" - "syscall" + + "golang.org/x/sys/unix" ) // NsHandle is a handle to a network namespace. It can be cast directly @@ -24,11 +25,11 @@ func (ns NsHandle) Equal(other NsHandle) bool { if ns == other { return true } - var s1, s2 syscall.Stat_t - if err := syscall.Fstat(int(ns), &s1); err != nil { + var s1, s2 unix.Stat_t + if err := unix.Fstat(int(ns), &s1); err != nil { return false } - if err := syscall.Fstat(int(other), &s2); err != nil { + if err := unix.Fstat(int(other), &s2); err != nil { return false } return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino) @@ -36,11 +37,11 @@ func (ns NsHandle) Equal(other NsHandle) bool { // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { - var s syscall.Stat_t if ns == -1 { return "NS(None)" } - if err := syscall.Fstat(int(ns), &s); err != nil { + var s unix.Stat_t + if err := unix.Fstat(int(ns), &s); err != nil { return fmt.Sprintf("NS(%d: unknown)", ns) } return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino) @@ -49,11 +50,11 @@ func (ns NsHandle) String() string { // UniqueId returns a string which uniquely identifies the namespace // associated with the network handle. func (ns NsHandle) UniqueId() string { - var s syscall.Stat_t if ns == -1 { return "NS(none)" } - if err := syscall.Fstat(int(ns), &s); err != nil { + var s unix.Stat_t + if err := unix.Fstat(int(ns), &s); err != nil { return "NS(unknown)" } return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino) @@ -67,7 +68,7 @@ func (ns NsHandle) IsOpen() bool { // Close closes the NsHandle and resets its file descriptor to -1. // It is not safe to use an NsHandle after Close() is called. func (ns *NsHandle) Close() error { - if err := syscall.Close(int(*ns)); err != nil { + if err := unix.Close(int(*ns)); err != nil { return err } (*ns) = -1 diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go index e665ef449..c76acd087 100644 --- a/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -6,44 +6,30 @@ import ( "fmt" "io/ioutil" "os" + "path" "path/filepath" - "runtime" "strconv" "strings" "syscall" -) -// SYS_SETNS syscall allows changing the namespace of the current process. -var SYS_SETNS = map[string]uintptr{ - "386": 346, - "amd64": 308, - "arm64": 268, - "arm": 375, - "mips": 4344, - "mipsle": 4344, - "ppc64": 350, - "ppc64le": 350, - "s390x": 339, -}[runtime.GOARCH] + "golang.org/x/sys/unix" +) // Deprecated: use syscall pkg instead (go >= 1.5 needed). const ( - CLONE_NEWUTS = 0x04000000 /* New utsname group? */ - CLONE_NEWIPC = 0x08000000 /* New ipcs */ - CLONE_NEWUSER = 0x10000000 /* New user namespace */ - CLONE_NEWPID = 0x20000000 /* New pid namespace */ - CLONE_NEWNET = 0x40000000 /* New network namespace */ - CLONE_IO = 0x80000000 /* Get io context */ + CLONE_NEWUTS = 0x04000000 /* New utsname group? */ + CLONE_NEWIPC = 0x08000000 /* New ipcs */ + CLONE_NEWUSER = 0x10000000 /* New user namespace */ + CLONE_NEWPID = 0x20000000 /* New pid namespace */ + CLONE_NEWNET = 0x40000000 /* New network namespace */ + CLONE_IO = 0x80000000 /* Get io context */ + bindMountPath = "/run/netns" /* Bind mount path for named netns */ ) // Setns sets namespace using syscall. Note that this should be a method // in syscall but it has not been added. func Setns(ns NsHandle, nstype int) (err error) { - _, _, e1 := syscall.Syscall(SYS_SETNS, uintptr(ns), uintptr(nstype), 0) - if e1 != 0 { - err = e1 - } - return + return unix.Setns(int(ns), nstype) } // Set sets the current network namespace to the namespace represented @@ -52,23 +38,67 @@ func Set(ns NsHandle) (err error) { return Setns(ns, CLONE_NEWNET) } -// New creates a new network namespace and returns a handle to it. +// New creates a new network namespace, sets it as current and returns +// a handle to it. func New() (ns NsHandle, err error) { - if err := syscall.Unshare(CLONE_NEWNET); err != nil { + if err := unix.Unshare(CLONE_NEWNET); err != nil { return -1, err } return Get() } +// NewNamed creates a new named network namespace and returns a handle to it +func NewNamed(name string) (NsHandle, error) { + if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { + err = os.MkdirAll(bindMountPath, 0755) + if err != nil { + return None(), err + } + } + + newNs, err := New() + if err != nil { + return None(), err + } + + namedPath := path.Join(bindMountPath, name) + + f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) + if err != nil { + return None(), err + } + f.Close() + + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()) + err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "") + if err != nil { + return None(), err + } + + return newNs, nil +} + +// DeleteNamed deletes a named network namespace +func DeleteNamed(name string) error { + namedPath := path.Join(bindMountPath, name) + + err := syscall.Unmount(namedPath, syscall.MNT_DETACH) + if err != nil { + return err + } + + return os.Remove(namedPath) +} + // Get gets a handle to the current threads network namespace. func Get() (NsHandle, error) { - return GetFromThread(os.Getpid(), syscall.Gettid()) + return GetFromThread(os.Getpid(), unix.Gettid()) } // GetFromPath gets a handle to a network namespace // identified by the path func GetFromPath(path string) (NsHandle, error) { - fd, err := syscall.Open(path, syscall.O_RDONLY, 0) + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { return -1, err } diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/go.etcd.io/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/go.etcd.io/etcd/NOTICE similarity index 100% rename from vendor/github.com/coreos/etcd/NOTICE rename to vendor/go.etcd.io/etcd/NOTICE diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/go.etcd.io/etcd/client/README.md similarity index 87% rename from vendor/github.com/coreos/etcd/client/README.md rename to vendor/go.etcd.io/etcd/client/README.md index c0ec81901..521d6c012 100644 --- a/vendor/github.com/coreos/etcd/client/README.md +++ b/vendor/go.etcd.io/etcd/client/README.md @@ -2,19 +2,14 @@ etcd/client is the Go client library for etcd. -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) +[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client) -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). ## Install ```bash -go get github.com/coreos/etcd/client +go get go.etcd.io/etcd/client ``` ## Usage @@ -25,9 +20,9 @@ package main import ( "log" "time" + "context" - "golang.org/x/net/context" - "github.com/coreos/etcd/client" + "go.etcd.io/etcd/client" ) func main() { diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/go.etcd.io/etcd/client/auth_role.go similarity index 99% rename from vendor/github.com/coreos/etcd/client/auth_role.go rename to vendor/go.etcd.io/etcd/client/auth_role.go index d15e00dd7..b6ba7e150 100644 --- a/vendor/github.com/coreos/etcd/client/auth_role.go +++ b/vendor/go.etcd.io/etcd/client/auth_role.go @@ -16,11 +16,10 @@ package client import ( "bytes" + "context" "encoding/json" "net/http" "net/url" - - "golang.org/x/net/context" ) type Role struct { diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/go.etcd.io/etcd/client/auth_user.go similarity index 99% rename from vendor/github.com/coreos/etcd/client/auth_user.go rename to vendor/go.etcd.io/etcd/client/auth_user.go index 97c3f3181..8e7e2efe8 100644 --- a/vendor/github.com/coreos/etcd/client/auth_user.go +++ b/vendor/go.etcd.io/etcd/client/auth_user.go @@ -16,12 +16,11 @@ package client import ( "bytes" + "context" "encoding/json" "net/http" "net/url" "path" - - "golang.org/x/net/context" ) var ( diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/go.etcd.io/etcd/client/cancelreq.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/cancelreq.go rename to vendor/go.etcd.io/etcd/client/cancelreq.go diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/go.etcd.io/etcd/client/client.go similarity index 93% rename from vendor/github.com/coreos/etcd/client/client.go rename to vendor/go.etcd.io/etcd/client/client.go index f9131b472..de9ab798e 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/go.etcd.io/etcd/client/client.go @@ -15,6 +15,8 @@ package client import ( + "context" + "encoding/json" "errors" "fmt" "io/ioutil" @@ -27,7 +29,7 @@ import ( "sync" "time" - "golang.org/x/net/context" + "go.etcd.io/etcd/version" ) var ( @@ -201,6 +203,9 @@ type Client interface { // returned SetEndpoints(eps []string) error + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + httpClient } @@ -366,12 +371,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } - if isOneShot { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { + } else if resp.StatusCode/100 == 5 { switch resp.StatusCode { case http.StatusInternalServerError, http.StatusServiceUnavailable: // TODO: make sure this is a no leader response @@ -379,10 +379,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } - if isOneShot { - return nil, nil, cerr.Errors[0] + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue } - continue + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err } if k != pinned { c.Lock() @@ -477,6 +483,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration } } +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + type roundTripResponse struct { resp *http.Response err error @@ -607,11 +640,11 @@ func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (* if resp.StatusCode/100 == 3 { hdr := resp.Header.Get("Location") if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") + return nil, nil, fmt.Errorf("location header not set") } loc, err := url.Parse(hdr) if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr) } next = &redirectedHTTPAction{ action: act, @@ -637,8 +670,15 @@ func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { } func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) for i, k := range p { neps[i] = eps[k] } diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/go.etcd.io/etcd/client/cluster_error.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/cluster_error.go rename to vendor/go.etcd.io/etcd/client/cluster_error.go diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/go.etcd.io/etcd/client/curl.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/curl.go rename to vendor/go.etcd.io/etcd/client/curl.go diff --git a/vendor/go.etcd.io/etcd/client/discover.go b/vendor/go.etcd.io/etcd/client/discover.go new file mode 100644 index 000000000..580c25626 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "go.etcd.io/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string, serviceName string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain, serviceName) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/go.etcd.io/etcd/client/doc.go similarity index 94% rename from vendor/github.com/coreos/etcd/client/doc.go rename to vendor/go.etcd.io/etcd/client/doc.go index 32fdfb52c..abe5199c3 100644 --- a/vendor/github.com/coreos/etcd/client/doc.go +++ b/vendor/go.etcd.io/etcd/client/doc.go @@ -19,9 +19,9 @@ Create a Config and exchange it for a Client: import ( "net/http" + "context" - "github.com/coreos/etcd/client" - "golang.org/x/net/context" + "go.etcd.io/etcd/client" ) cfg := client.Config{ @@ -59,7 +59,7 @@ Use a custom context to set timeouts on your operations: ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - // set a new key, ignoring it's previous state + // set a new key, ignoring its previous state _, err := kAPI.Set(ctx, "/ping", "pong", nil) if err != nil { if err == context.DeadlineExceeded { diff --git a/vendor/go.etcd.io/etcd/client/json.go b/vendor/go.etcd.io/etcd/client/json.go new file mode 100644 index 000000000..97cdbcd7c --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/json.go @@ -0,0 +1,72 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/json-iterator/go" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type customNumberExtension struct { + jsoniter.DummyExtension +} + +func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { + if typ.String() == "interface {}" { + return customNumberDecoder{} + } + return nil +} + +type customNumberDecoder struct { +} + +func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number jsoniter.Number + iter.ReadVal(&number) + i64, err := strconv.ParseInt(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = i64 + return + } + f64, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*interface{})(ptr) = f64 + return + } + iter.ReportError("DecodeNumber", err.Error()) + default: + *(*interface{})(ptr) = iter.Read() + } +} + +// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive when unmarshalling, and otherwise compatible with +// the encoding/json standard library. +func caseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/go.etcd.io/etcd/client/keys.go similarity index 98% rename from vendor/github.com/coreos/etcd/client/keys.go rename to vendor/go.etcd.io/etcd/client/keys.go index 4a6c41a78..ec53830c7 100644 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ b/vendor/go.etcd.io/etcd/client/keys.go @@ -14,21 +14,17 @@ package client -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - import ( + "context" "encoding/json" "errors" "fmt" + "go.etcd.io/etcd/pkg/pathutil" "net/http" "net/url" "strconv" "strings" "time" - - "github.com/coreos/etcd/pkg/pathutil" - "github.com/ugorji/go/codec" - "golang.org/x/net/context" ) const ( @@ -66,7 +62,7 @@ func (e Error) Error() string { } var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint") ErrEmptyBody = errors.New("client: response body is empty") ) @@ -653,13 +649,14 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp default: err = unmarshalFailedKeysResponse(body) } - - return + return res, err } +var jsonIterator = caseSensitiveJsonIterator() + func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + err := jsonIterator.Unmarshal(body, &res) if err != nil { return nil, ErrInvalidJSON } diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/go.etcd.io/etcd/client/members.go similarity index 98% rename from vendor/github.com/coreos/etcd/client/members.go rename to vendor/go.etcd.io/etcd/client/members.go index 23adf07ad..657131ab0 100644 --- a/vendor/github.com/coreos/etcd/client/members.go +++ b/vendor/go.etcd.io/etcd/client/members.go @@ -16,15 +16,14 @@ package client import ( "bytes" + "context" "encoding/json" "fmt" "net/http" "net/url" "path" - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/types" ) var ( @@ -44,7 +43,7 @@ type Member struct { PeerURLs []string `json:"peerURLs"` // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. + // serves its client-facing APIs. ClientURLs []string `json:"clientURLs"` } diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/go.etcd.io/etcd/client/util.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/util.go rename to vendor/go.etcd.io/etcd/client/util.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go similarity index 87% rename from vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go index 58a77dfc1..4ce15dc6b 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go @@ -18,5 +18,10 @@ package fileutil import "os" +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + // OpenDir opens a directory for syncing. func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go similarity index 92% rename from vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go index c123395c0..a10a90583 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go @@ -21,6 +21,11 @@ import ( "syscall" ) +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0777 +) + // OpenDir opens a directory in windows with write access for syncing. func OpenDir(path string) (*os.File, error) { fd, err := openDir(path) diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go similarity index 69% rename from vendor/github.com/coreos/etcd/client/discover.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/doc.go index bfd7aec93..69dde5a7d 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go @@ -1,4 +1,4 @@ -// Copyright 2015 The etcd Authors +// Copyright 2018 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -package client - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} +// Package fileutil implements utility functions related to files and paths. +package fileutil diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go similarity index 62% rename from vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go index aad40b759..f36136182 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go @@ -12,15 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package fileutil implements utility functions related to files and paths. package fileutil import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" - "sort" "github.com/coreos/pkg/capnslog" ) @@ -28,13 +27,9 @@ import ( const ( // PrivateFileMode grants owner to read/write a file. PrivateFileMode = 0600 - // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0700 ) -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") -) +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil") // IsDirWriteable checks if dir is writable by writing and removing a file // to dir. It returns nil if dir is writable. @@ -46,32 +41,25 @@ func IsDirWriteable(dir string) error { return os.Remove(f) } -// ReadDir returns the filenames in the given directory in sorted order. -func ReadDir(dirpath string) ([]string, error) { - dir, err := os.Open(dirpath) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - // TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory // does not exists. TouchDirAll also ensures the given directory is writable. func TouchDirAll(dir string) error { - // If path is already a directory, MkdirAll does nothing - // and returns nil. - err := os.MkdirAll(dir, PrivateDirMode) - if err != nil { - // if mkdirAll("a/text") and "text" is not - // a directory, this will return syscall.ENOTDIR - return err + // If path is already a directory, MkdirAll does nothing and returns nil, so, + // first check if dir exist with an expected permission mode. + if Exist(dir) { + err := CheckDirPermission(dir, PrivateDirMode) + if err != nil { + plog.Warningf("check file permission: %v", err) + } + } else { + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } } + return IsDirWriteable(dir) } @@ -92,6 +80,7 @@ func CreateDirAll(dir string) error { return err } +// Exist returns true if a file or directory exists. func Exist(name string) bool { _, err := os.Stat(name) return err == nil @@ -101,11 +90,11 @@ func Exist(name string) bool { // shorten the length of the file. func ZeroToEnd(f *os.File) error { // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, os.SEEK_CUR) + off, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - lenf, lerr := f.Seek(0, os.SEEK_END) + lenf, lerr := f.Seek(0, io.SeekEnd) if lerr != nil { return lerr } @@ -116,6 +105,25 @@ func ZeroToEnd(f *os.File) error { if err = Preallocate(f, lenf, true); err != nil { return err } - _, err = f.Seek(off, os.SEEK_SET) + _, err = f.Seek(off, io.SeekStart) return err } + +// CheckDirPermission checks permission on an existing dir. +// Returns error if dir is empty or exist with a different permission than specified. +func CheckDirPermission(dir string, perm os.FileMode) error { + if !Exist(dir) { + return fmt.Errorf("directory %q empty, cannot check permission.", dir) + } + //check the existing permission on the directory + dirInfo, err := os.Stat(dir) + if err != nil { + return err + } + dirMode := dirInfo.Mode().Perm() + if dirMode != perm { + err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode)) + return err + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go similarity index 91% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go index dec25a1af..b0abc98ee 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go @@ -17,6 +17,8 @@ package fileutil import ( + "fmt" + "io" "os" "syscall" ) @@ -36,7 +38,7 @@ const ( var ( wrlck = syscall.Flock_t{ Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } @@ -61,7 +63,7 @@ func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, err + return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err) } flock := wrlck @@ -82,15 +84,14 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, err + return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err) } flock := wrlck err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) - if err != nil { f.Close() return nil, err } - return &LockedFile{f}, err + return &LockedFile{f}, nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go similarity index 99% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go index 8698f4a8d..b1817230a 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go @@ -121,5 +121,5 @@ func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.O err = syscall.EINVAL } } - return + return err } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go similarity index 82% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go index bb7f02812..c747b7cf8 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go @@ -14,7 +14,10 @@ package fileutil -import "os" +import ( + "io" + "os" +) // Preallocate tries to allocate the space for given // file. This operation is only supported on linux by a @@ -22,6 +25,10 @@ import "os" // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } if extendFile { return preallocExtend(f, sizeInBytes) } @@ -29,15 +36,15 @@ func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { } func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, os.SEEK_CUR) + curOff, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - size, err := f.Seek(sizeInBytes, os.SEEK_END) + size, err := f.Seek(sizeInBytes, io.SeekEnd) if err != nil { return err } - if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { + if _, err = f.Seek(curOff, io.SeekStart); err != nil { return err } if sizeInBytes > size { diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go similarity index 61% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go index 1ed09c560..5a6dccfa7 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go @@ -30,6 +30,8 @@ func preallocExtend(f *os.File, sizeInBytes int64) error { } func preallocFixed(f *os.File, sizeInBytes int64) error { + // allocate all requested space or no space at all + // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag fstore := &syscall.Fstore_t{ Flags: syscall.F_ALLOCATEALL, Posmode: syscall.F_PEOFPOSMODE, @@ -39,5 +41,25 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { if errno == 0 || errno == syscall.ENOTSUP { return nil } + + // wrong argument to fallocate syscall + if errno == syscall.EINVAL { + // filesystem "st_blocks" are allocated in the units of + // "Allocation Block Size" (run "diskutil info /" command) + var stat syscall.Stat_t + syscall.Fstat(int(f.Fd()), &stat) + + // syscall.Statfs_t.Bsize is "optimal transfer block size" + // and contains matching 4096 value when latest OS X kernel + // supports 4,096 KB filesystem block size + var statfs syscall.Statfs_t + syscall.Fstatfs(int(f.Fd()), &statfs) + blockSize := int64(statfs.Bsize) + + if stat.Blocks*blockSize >= sizeInBytes { + // enough blocks are already allocated + return nil + } + } return errno } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go similarity index 59% rename from vendor/github.com/coreos/etcd/pkg/fileutil/purge.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/purge.go index 92fceab01..d116f340b 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go @@ -20,16 +20,28 @@ import ( "sort" "strings" "time" + + "go.uber.org/zap" ) -func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(dirname, suffix, max, interval, stop, nil) +func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) +} + +func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + return doneC, errC } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. -func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { +// if donec is non-nil, the function closes it to notify its exit. +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { errC := make(chan error, 1) go func() { + if donec != nil { + defer close(donec) + } for { fnames, err := ReadDir(dirname) if err != nil { @@ -55,11 +67,19 @@ func purgeFile(dirname string, suffix string, max uint, interval time.Duration, return } if err = l.Close(); err != nil { - plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + if lg != nil { + lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + } else { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + } errC <- err return } - plog.Infof("purged file %s successfully", f) + if lg != nil { + lg.Info("purged", zap.String("path", f)) + } else { + plog.Infof("purged file %s successfully", f) + } newfnames = newfnames[1:] } if purgec != nil { diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go new file mode 100644 index 000000000..2eeaa89bc --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go @@ -0,0 +1,70 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" +) + +// ReadDirOp represents an read-directory operation. +type ReadDirOp struct { + ext string +} + +// ReadDirOption configures archiver operations. +type ReadDirOption func(*ReadDirOp) + +// WithExt filters file names by their extensions. +// (e.g. WithExt(".wal") to list only WAL files) +func WithExt(ext string) ReadDirOption { + return func(op *ReadDirOp) { op.ext = ext } +} + +func (op *ReadDirOp) applyOpts(opts []ReadDirOption) { + for _, opt := range opts { + opt(op) + } +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(d string, opts ...ReadDirOption) ([]string, error) { + op := &ReadDirOp{} + op.applyOpts(opts) + + dir, err := os.Open(d) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + + if op.ext != "" { + tss := make([]string, 0) + for _, v := range names { + if filepath.Ext(v) == op.ext { + tss = append(tss, v) + } + } + names = tss + } + return names, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/sync.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/sync.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/go.etcd.io/etcd/pkg/pathutil/path.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/pathutil/path.go rename to vendor/go.etcd.io/etcd/pkg/pathutil/path.go diff --git a/vendor/go.etcd.io/etcd/pkg/srv/srv.go b/vendor/go.etcd.io/etcd/pkg/srv/srv.go new file mode 100644 index 000000000..c3560026d --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/srv/srv.go @@ -0,0 +1,142 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "go.etcd.io/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } else { + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + err := updateNodeMap(service, serviceScheme) + if err != nil { + return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string, serviceName string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(GetSRVService(service, serviceName, "https"), "https") + errHTTP := updateURLs(GetSRVService(service, serviceName, "http"), "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} + +// GetSRVService generates a SRV service including an optional suffix. +func GetSRVService(service, serviceName string, scheme string) (SRVService string) { + if scheme == "https" { + service = fmt.Sprintf("%s-ssl", service) + } + + if serviceName != "" { + return fmt.Sprintf("%s-%s", service, serviceName) + } + return service +} diff --git a/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go new file mode 100644 index 000000000..b5916bb54 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go @@ -0,0 +1,51 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import "crypto/tls" + +// cipher suites implemented by Go +// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go +var cipherSuites = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +// GetCipherSuite returns the corresponding cipher suite, +// and boolean value if it is supported. +func GetCipherSuite(s string) (uint16, bool) { + v, ok := cipherSuites[s] + return v, ok +} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go rename to vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go similarity index 99% rename from vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go rename to vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go index 79b1f632e..3a5aef089 100644 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go @@ -41,6 +41,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { if err != nil { return nil, err } + certPool.AddCert(cert) } } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/go.etcd.io/etcd/pkg/transport/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/doc.go rename to vendor/go.etcd.io/etcd/pkg/transport/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go similarity index 99% rename from vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go index 6ccae4ee4..4ff8e7f00 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go +++ b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go @@ -79,7 +79,7 @@ func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { kac.SetKeepAlive(true) kac.SetKeepAlivePeriod(30 * time.Second) c = tls.Server(c, l.config) - return + return c, nil } // NewListener creates a Listener which accepts connections from an inner diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go rename to vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go diff --git a/vendor/go.etcd.io/etcd/pkg/transport/listener.go b/vendor/go.etcd.io/etcd/pkg/transport/listener.go new file mode 100644 index 000000000..7260e4d07 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener.go @@ -0,0 +1,449 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/tlsutil" + + "go.uber.org/zap" +) + +// NewListener creates a new listner. +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(scheme, tlsinfo, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + return net.Listen("tcp", addr) +} + +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + if tlsinfo != nil && tlsinfo.SkipClientSANVerify { + return NewTLSListener(l, tlsinfo) + } + return newTLSListener(l, tlsinfo, checkSAN) +} + +type TLSInfo struct { + CertFile string + KeyFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool + SkipClientSANVerify bool + + // ServerName ensures the cert matches the given host in case of discovery / virtual hosting + ServerName string + + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + + // CipherSuites is a list of supported cipher suites. + // If empty, Go auto-populates it by default. + // Note that cipher suites are prioritized in the given order. + CipherSuites []uint16 + + selfCert bool + + // parseFunc exists to simplify testing. Typically, parseFunc + // should be left nil. In that case, tls.X509KeyPair will be used. + parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN is a CN which must be provided by a client. + AllowedCN string + + // AllowedHostname is an IP address or hostname that must match the TLS + // certificate provided by a client. + AllowedHostname string + + // Logger logs TLS errors. + // If nil, all logs are discarded. + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { + info.Logger = lg + err = fileutil.TouchDirAll(dirpath) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot create cert directory", + zap.Error(err), + ) + } + return + } + + certPath := filepath.Join(dirpath, "cert.pem") + keyPath := filepath.Join(dirpath, "key.pem") + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.selfCert = true + return + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + } + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), + BasicConstraintsValid: true, + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + } + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + } + return + } + + certOut, err := os.Create(certPath) + if err != nil { + info.Logger.Warn( + "cannot cert file", + zap.String("path", certPath), + zap.Error(err), + ) + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + if info.Logger != nil { + info.Logger.Info("created cert file", zap.String("path", certPath)) + } + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + } + return + } + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + if info.Logger != nil { + info.Logger.Info("created key file", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts) +} + +// baseConfig is called on initial TLS handshake start. +// +// Previously, +// 1. Server has non-empty (*tls.Config).Certificates on client hello +// 2. Server calls (*tls.Config).GetCertificate iff: +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// +// When (*tls.Config).Certificates is always populated on initial handshake, +// client is expected to provide a valid matching SNI to pass the TLS +// verification, thus trigger server (*tls.Config).GetCertificate to reload +// TLS assets. However, a cert whose SAN field does not include domain names +// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus +// it was never able to trigger TLS reload on initial handshake; first +// ceritifcate object was being used, never being updated. +// +// Now, (*tls.Config).Certificates is created empty on initial TLS client +// handshake, in order to trigger (*tls.Config).GetCertificate and populate +// rest of the certificates on every new TLS connection, even when client +// SNI is empty (e.g. cert only includes IPs). +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + if info.Logger == nil { + info.Logger = zap.NewNop() + } + + _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if err != nil { + return nil, err + } + + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + + if len(info.CipherSuites) > 0 { + cfg.CipherSuites = info.CipherSuites + } + + // Client certificates may be verified by either an exact match on the CN, + // or a more general check of the CN and SANs. + var verifyCertificate func(*x509.Certificate) bool + if info.AllowedCN != "" { + if info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + verifyCertificate = func(cert *x509.Certificate) bool { + return info.AllowedCN == cert.Subject.CommonName + } + } + if info.AllowedHostname != "" { + verifyCertificate = func(cert *x509.Certificate) bool { + return cert.VerifyHostname(info.AllowedHostname) == nil + } + } + if verifyCertificate != nil { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if verifyCertificate(chains[0]) { + return nil + } + } + } + return errors.New("client certificate authentication failed") + } + } + + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + return cfg, nil +} + +// cafiles returns a list of CA file paths. +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP server. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + cfg.ClientAuth = tls.NoClientCert + if info.TrustedCAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + cs := info.cafiles() + if len(cs) > 0 { + cp, err := tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + + return cfg, nil +} + +// ClientConfig generates a tls.Config object for use by an HTTP client. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + cfg.InsecureSkipVerify = info.InsecureSkipVerify + + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if hasNonEmptyCN { + return nil, fmt.Errorf("cert has non empty Common Name (%s)", cn) + } + } + + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go new file mode 100644 index 000000000..6f1600945 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,272 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials or CRL revoked +// certificates. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc +} + +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + check: check, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return + } + + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + h, _, herr := net.SplitHostPort(remoteAddr) + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go similarity index 86% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go index 0f4df5fbe..273e99fe0 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go @@ -15,7 +15,6 @@ package transport import ( - "crypto/tls" "net" "time" ) @@ -23,7 +22,7 @@ import ( // NewTimeoutListener returns a listener that listens on the given address. // If read/write on the accepted connection blocks longer than its time limit, // it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { ln, err := newListener(addr, scheme) if err != nil { return nil, err @@ -33,7 +32,7 @@ func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeou rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { + if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/go.etcd.io/etcd/pkg/transport/tls.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/tls.go rename to vendor/go.etcd.io/etcd/pkg/transport/tls.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/go.etcd.io/etcd/pkg/transport/transport.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/transport.go rename to vendor/go.etcd.io/etcd/pkg/transport/transport.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go similarity index 87% rename from vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go index c126b6f7f..123e2036f 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go @@ -22,7 +22,7 @@ import ( type unixListener struct{ net.Listener } func NewUnixListener(addr string) (net.Listener, error) { - if err := os.RemoveAll(addr); err != nil { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { return nil, err } l, err := net.Listen("unix", addr) @@ -33,7 +33,7 @@ func NewUnixListener(addr string) (net.Listener, error) { } func (ul *unixListener) Close() error { - if err := os.RemoveAll(ul.Addr().String()); err != nil { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { return err } return ul.Listener.Close() diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/pkg/types/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/doc.go rename to vendor/go.etcd.io/etcd/pkg/types/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/go.etcd.io/etcd/pkg/types/id.go similarity index 98% rename from vendor/github.com/coreos/etcd/pkg/types/id.go rename to vendor/go.etcd.io/etcd/pkg/types/id.go index 1b042d9ce..ae00388dd 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ b/vendor/go.etcd.io/etcd/pkg/types/id.go @@ -14,9 +14,7 @@ package types -import ( - "strconv" -) +import "strconv" // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/go.etcd.io/etcd/pkg/types/set.go similarity index 88% rename from vendor/github.com/coreos/etcd/pkg/types/set.go rename to vendor/go.etcd.io/etcd/pkg/types/set.go index 73ef431be..e7a3cdc9a 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/set.go +++ b/vendor/go.etcd.io/etcd/pkg/types/set.go @@ -61,7 +61,7 @@ func (us *unsafeSet) Remove(value string) { // Contains returns whether the set contains the given value func (us *unsafeSet) Contains(value string) (exists bool) { _, exists = us.d[value] - return + return exists } // ContainsAll returns whether the set contains all given values @@ -94,7 +94,7 @@ func (us *unsafeSet) Values() (values []string) { for val := range us.d { values = append(values, val) } - return + return values } // Copy creates a new Set containing the values of the first @@ -148,6 +148,14 @@ func (ts *tsafeSet) Contains(value string) (exists bool) { func (ts *tsafeSet) Equals(other Set) bool { ts.m.RLock() defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Equals(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + return true + } + } return ts.us.Equals(other) } @@ -173,6 +181,15 @@ func (ts *tsafeSet) Copy() Set { func (ts *tsafeSet) Sub(other Set) Set { ts.m.RLock() defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Sub(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + usResult := NewUnsafeSet() + return &tsafeSet{usResult, sync.RWMutex{}} + } + } usResult := ts.us.Sub(other).(*unsafeSet) return &tsafeSet{usResult, sync.RWMutex{}} } diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/pkg/types/slice.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/slice.go rename to vendor/go.etcd.io/etcd/pkg/types/slice.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/go.etcd.io/etcd/pkg/types/urls.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/urls.go rename to vendor/go.etcd.io/etcd/pkg/types/urls.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/urlsmap.go rename to vendor/go.etcd.io/etcd/pkg/types/urlsmap.go diff --git a/vendor/go.etcd.io/etcd/version/version.go b/vendor/go.etcd.io/etcd/version/version.go new file mode 100644 index 000000000..ee97e461e --- /dev/null +++ b/vendor/go.etcd.io/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.4.13" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod deleted file mode 100644 index c867df5f5..000000000 --- a/vendor/go.opencensus.io/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module go.opencensus.io - -require ( - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 - github.com/golang/protobuf v1.3.1 - github.com/google/go-cmp v0.3.0 - github.com/stretchr/testify v1.4.0 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 - golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect - golang.org/x/text v0.3.2 // indirect - google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect - google.golang.org/grpc v1.20.1 -) - -go 1.13 diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum deleted file mode 100644 index ed2a1d844..000000000 --- a/vendor/go.opencensus.io/go.sum +++ /dev/null @@ -1,73 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index 53e71305a..ed3a5db56 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -204,7 +204,10 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists } + return trace.Status{Code: code, Message: codeToStr[code]} } diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..571116cc3 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..c3fa25389 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,12 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 000000000..13d0a4f25 --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: oldstable + - go: stable + env: LINT=1 + +cache: + directories: + - vendor + +before_install: + - go version + +script: + - test -z "$LINT" || make lint + - make cover + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 000000000..24c0274dc --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,76 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/go.uber.org/atomic/LICENSE.txt similarity index 84% rename from vendor/github.com/ugorji/go/LICENSE rename to vendor/go.uber.org/atomic/LICENSE.txt index 95a0f0541..8765c9fbc 100644 --- a/vendor/github.com/ugorji/go/LICENSE +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -1,7 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. -All rights reserved. +Copyright (c) 2016 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -10,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..1b1376d42 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,78 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..ade0c20f1 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 000000000..9cf1914b1 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(v bool) *Bool { + x := &Bool{} + if v != _zeroBool { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(v bool) { + x.v.Store(boolToInt(v)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(o, n bool) bool { + return x.v.CAS(boolToInt(o), boolToInt(n)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(o bool) bool { + return truthy(x.v.Swap(boolToInt(o))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 000000000..c7bf7a827 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 000000000..ae7390ee6 --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 000000000..027cfcb20 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(v time.Duration) *Duration { + x := &Duration{} + if v != _zeroDuration { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(v time.Duration) { + x.v.Store(int64(v)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(o, n time.Duration) bool { + return x.v.CAS(int64(o), int64(n)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(o time.Duration) time.Duration { + return time.Duration(x.v.Swap(int64(o))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 000000000..6273b66bd --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..a6166fbea --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,51 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(v error) *Error { + x := &Error{} + if v != _zeroError { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(v error) { + x.v.Store(packError(v)) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 000000000..ffe0be21c --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 000000000..071906020 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,76 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(v float64) *Float64 { + x := &Float64{} + if v != _zeroFloat64 { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(v float64) { + x.v.Store(math.Float64bits(v)) +} + +// CAS is an atomic compare-and-swap for float64 values. +func (x *Float64) CAS(o, n float64) bool { + return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 000000000..927b1add7 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "strconv" + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 000000000..50d6b2485 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,26 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 000000000..18ae56493 --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(i int32) *Int32 { + return &Int32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 000000000..2bcbbfaa9 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(i int64) *Int64 { + return &Int64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 000000000..a8201cb4a --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..225b7a2be --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,54 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(v string) *String { + x := &String{} + if v != _zeroString { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) + } + return _zeroString +} + +// Store atomically stores the passed string. +func (x *String) Store(v string) { + x.v.Store(v) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 000000000..3a9558213 --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 000000000..a973aba1a --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 000000000..3b6c71fd5 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 000000000..671f3a382 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 000000000..b9a05e3da --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml new file mode 100644 index 000000000..8636ab42a --- /dev/null +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -0,0 +1,23 @@ +sudo: false +language: go +go_import_path: go.uber.org/multierr + +env: + global: + - GO111MODULE=on + +go: + - oldstable + - stable + +before_install: +- go version + +script: +- | + set -e + make lint + make cover + +after_success: +- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 000000000..6f1db9ef4 --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,60 @@ +Releases +======== + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 000000000..858e02475 --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 000000000..316004400 --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,42 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html + +update-license: + @cd tools && go install go.uber.org/tools/update-license + @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 000000000..751bd65e5 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,23 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Installation + + go get -u go.uber.org/multierr + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg +[doc]: https://godoc.org/go.uber.org/multierr +[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://travis-ci.com/uber-go/multierr +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 000000000..5c9b67d53 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,449 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a verison that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml new file mode 100644 index 000000000..6ef084ec2 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/multierr +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go new file mode 100644 index 000000000..264b0eac0 --- /dev/null +++ b/vendor/go.uber.org/multierr/go113.go @@ -0,0 +1,52 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build go1.13 + +package multierr + +import "errors" + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 000000000..8e5ca7d3e --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 000000000..da9d9d00b --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 000000000..3154a1e64 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,109 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.com/uber-go/zap +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock + diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 000000000..3b99bf0ac --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,460 @@ +# Changelog + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 +[#487]: https://github.com/uber-go/zap/pull/487 +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 +[#504]: https://github.com/uber-go/zap/pull/504 +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 +[#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 +[#751]: https://github.com/uber-go/zap/pull/751 +[#758]: https://github.com/uber-go/zap/pull/758 +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..e327d9aa5 --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 000000000..5cd965687 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,75 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 000000000..b183b20bc --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 000000000..6652bed45 --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 000000000..9b1bc3b0e --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,73 @@ +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +# +# We track coverage only for the main module. +MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test + +# Many Go tools take file globs or directories as arguments instead of packages. +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: all +all: lint test + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking lint..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking staticcheck..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./checklicense.sh | tee -a lint.log + @[ ! -s lint.log ] + @echo "Checking 'go mod tidy'..." + @make tidy + @if ! git diff --quiet; then \ + echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ + git --no-pager diff; \ + fi + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 000000000..1e64d6cff --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,134 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 862 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op +| zerolog | 4021 ns/op | +366% | 76 allocs/op +| go-kit | 4542 ns/op | +427% | 105 allocs/op +| apex/log | 26785 ns/op | +3007% | 115 allocs/op +| logrus | 29501 ns/op | +3322% | 125 allocs/op +| log15 | 29906 ns/op | +3369% | 122 allocs/op + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 126 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op +| zerolog | 88 ns/op | -30% | 0 allocs/op +| go-kit | 5087 ns/op | +3937% | 103 allocs/op +| log15 | 18548 ns/op | +14621% | 73 allocs/op +| apex/log | 26012 ns/op | +20544% | 104 allocs/op +| logrus | 27236 ns/op | +21516% | 113 allocs/op + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 118 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op +| zerolog | 93 ns/op | -21% | 0 allocs/op +| go-kit | 280 ns/op | +137% | 11 allocs/op +| standard library | 499 ns/op | +323% | 2 allocs/op +| apex/log | 1990 ns/op | +1586% | 10 allocs/op +| logrus | 3129 ns/op | +2552% | 24 allocs/op +| log15 | 3887 ns/op | +3194% | 23 allocs/op + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 000000000..5be3704a3 --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 000000000..3f4b86e08 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 000000000..8fb3e202c --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 000000000..345ac8b89 --- /dev/null +++ b/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 000000000..55637fb0b --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,264 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, fmt.Errorf("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 000000000..8638dd1b9 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 000000000..08ed83354 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 000000000..65982a51e --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 000000000..bbb745db5 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,549 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case *bool: + return Boolp(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case *complex128: + return Complex128p(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case *complex64: + return Complex64p(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case *float64: + return Float64p(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case *float32: + return Float32p(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case *int: + return Intp(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case *int64: + return Int64p(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case *int32: + return Int32p(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case *int16: + return Int16p(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case *int8: + return Int8p(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case *string: + return Stringp(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case *uint: + return Uintp(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case *uint64: + return Uint64p(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case *uint32: + return Uint32p(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case *uint16: + return Uint16p(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case *uint8: + return Uint8p(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case *uintptr: + return Uintptrp(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case *time.Time: + return Timep(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case *time.Duration: + return Durationp(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 000000000..131287507 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 000000000..8e1d05e9a --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 000000000..c1ac0507c --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,168 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go new file mode 100644 index 000000000..6b5dbda80 --- /dev/null +++ b/vendor/go.uber.org/zap/global_go112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build go1.12 + +package zap + +const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go new file mode 100644 index 000000000..d3ab9af93 --- /dev/null +++ b/vendor/go.uber.org/zap/global_prego112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build !go1.12 + +package zap + +const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 000000000..1297c33b3 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET +// +// The GET request returns a JSON description of the current logging level like: +// {"level":"info"} +// +// PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +// +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: err.Error()}) + return + } + lvl.SetLevel(requestedLvl) + enc.Encode(payload{Level: lvl.Level()}) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, fmt.Errorf("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, fmt.Errorf("must specify logging level") + } + return *pld.Level, nil + +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 000000000..dad583aaa --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 000000000..c4d5d02ab --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 000000000..dfc5b05fe --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 000000000..3567a9a1e --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 000000000..553f258e7 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,345 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onFatal zapcore.CheckWriteAction // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // check must always be called directly by a method in the Logger interface + // (e.g., Check, Info, Fatal). + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: time.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + onFatal := log.onFatal + // Noop is the default value for CheckWriteAction, and it leads to + // continued execution after a Fatal which is unexpected. + if onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.Should(ent, onFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + if log.addCaller { + frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) + if !defined { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + log.errorOutput.Sync() + } + + ce.Entry.Caller = zapcore.EntryCaller{ + Defined: defined, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + if log.addStack.Enabled(ce.Entry.Level) { + ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String + } + + return ce +} + +// getCallerFrame gets caller frame. The argument skip is the number of stack +// frames to ascend, with 0 identifying the caller of getCallerFrame. The +// boolean ok is false if it was not possible to recover the information. +// +// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. +func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { + const skipOffset = 2 // skip getCallerFrame and Callers + + pc := make([]uintptr, 1) + numFrames := runtime.Callers(skip+skipOffset, pc) + if numFrames < 1 { + return + } + + frame, _ = runtime.CallersFrames(pc).Next() + return frame, frame.PC != 0 +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 000000000..0135c2092 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +func OnFatal(action zapcore.CheckWriteAction) Option { + return optionFunc(func(log *Logger) { + log.onFatal = action + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 000000000..df46fa87a --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 000000000..0cf8c1ddf --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,85 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "sync" + + "go.uber.org/zap/internal/bufferpool" +) + +var ( + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +func takeStacktrace(skip int) string { + buffer := bufferpool.Get() + defer buffer.Free() + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var numFrames int + for { + // Skip the call to runtime.Callers and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + numFrames = runtime.Callers(skip+2, programCounters.pcs) + if numFrames < len(programCounters.pcs) { + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if i != 0 { + buffer.AppendByte('\n') + } + i++ + buffer.AppendString(frame.Function) + buffer.AppendByte('\n') + buffer.AppendByte('\t') + buffer.AppendString(frame.File) + buffer.AppendByte(':') + buffer.AppendInt(int64(frame.Line)) + } + + return buffer.String() +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 000000000..4084dada7 --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,315 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes three methods: one for loosely-typed +// structured logging, one for println-style formatting, and one for +// printf-style formatting. For example, SugaredLoggers can produce InfoLevel +// output with Infow ("info with" structured context), Info, or Infof. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 000000000..c5a1f1622 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 000000000..86a709ab0 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,99 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "io/ioutil" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return writers, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 000000000..2307af404 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + if c.LineEnding != "" { + line.AppendString(c.LineEnding) + } else { + line.AppendString(DefaultLineEnding) + } + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 000000000..a1ef8b034 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 000000000..31000e91f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 000000000..6601ca166 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,443 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// timeEncoder: +// layout: 06/01/02 03:04pm +// If value is string, it uses UnmarshalText. +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 000000000..4aa8b4f90 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,264 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + + "go.uber.org/multierr" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if ce.ErrorOutput != nil { + if err != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + ce.ErrorOutput.Sync() + } + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + case WriteThenGoexit: + runtime.Goexit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 000000000..f2a07d786 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,132 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArry and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 000000000..95bdb0a12 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 000000000..5db4afb30 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 000000000..5a1749261 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,66 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 000000000..5cf7d917e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,534 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "encoding/json" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + + // For consistency with our custom JSON encoder. + enc.reflectEnc.SetEscapeHTML(false) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(DefaultLineEnding) + } + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 000000000..e575c9f43 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,175 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 000000000..7af8dadcb --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 000000000..c3c55ba0d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 000000000..dfead0829 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 000000000..25f10ca1d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,208 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 000000000..07a32eef9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 000000000..d4a1af3d0 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0f443e693..8cfd6063e 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -16,15 +16,16 @@ Or you can manually git clone the repository to See godoc for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) +* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) ## Policy for new packages -We no longer accept new provider-specific packages in this repo. For -defining provider endpoints and provider-specific OAuth2 behavior, we -encourage you to create packages elsewhere. We'll keep the existing -packages for compatibility. +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod deleted file mode 100644 index b34578155..000000000 --- a/vendor/golang.org/x/oauth2/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module golang.org/x/oauth2 - -go 1.11 - -require ( - cloud.google.com/go v0.34.0 - golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - google.golang.org/appengine v1.4.0 -) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum deleted file mode 100644 index 6f0079b0d..000000000 --- a/vendor/golang.org/x/oauth2/go.sum +++ /dev/null @@ -1,12 +0,0 @@ -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index 87ae9d2a3..c9b69937a 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.5 // +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index c07c798bc..98bf56b73 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.5 // +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 42edd93ef..62377d2ff 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 && race // +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index c89cf8fc0..f8da30876 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 && !race // +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 4f7f9ad7c..55fa8d025 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index e7363a2f5..602473cba 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 // Package plan9 contains an interface to the low-level operating system diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index 84e147148..723b1f400 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -132,8 +132,10 @@ func Pipe(p []int) (err error) { } var pp [2]int32 err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 6819bc209..3f40b9bd7 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && 386 // +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 418abbbfc..0e6a96aa4 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && amd64 // +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 3e8a1a58c..244c501b7 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && arm // +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 474efad0e..7d3c060e1 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -149,7 +149,7 @@ To add a constant, add the header that includes it to the appropriate variable. Then, edit the regex (if necessary) to match the desired constant. Avoid making the regex too broad to avoid matching unintended constants. -### mkmerge.go +### internal/mkmerge This program is used to extract duplicate const, func, and type declarations from the generated architecture-specific files listed below, and merge these diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 396aadf86..ee7362348 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index a74ef58f8..a47b035f9 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -239,6 +239,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -260,6 +261,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -520,7 +522,7 @@ ccflags="$@" $2 ~ /^HW_MACHINE$/ || $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && - $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || @@ -605,6 +607,7 @@ ccflags="$@" $2 ~ /^MTD/ || $2 ~ /^OTP/ || $2 ~ /^MEM/ || + $2 ~ /^WG/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 8bf457059..5f63147e0 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -34,3 +34,52 @@ func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) return &ucred, nil } + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} + +// ParseOrigDstAddr decodes a socket control message containing the original +// destination address. To receive such a message the IP_RECVORIGDSTADDR or +// IPV6_RECVORIGDSTADDR option must be enabled on the socket. +func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) { + switch { + case m.Header.Level == SOL_IP && m.Header.Type == IP_ORIGDSTADDR: + pp := (*RawSockaddrInet4)(unsafe.Pointer(&m.Data[0])) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.Addr = pp.Addr + return sa, nil + + case m.Header.Level == SOL_IPV6 && m.Header.Type == IPV6_ORIGDSTADDR: + pp := (*RawSockaddrInet6)(unsafe.Pointer(&m.Data[0])) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + sa.Addr = pp.Addr + return sa, nil + + default: + return nil, EINVAL + } +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index d8efb715f..4f55c8d99 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -70,9 +70,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -85,9 +83,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -261,9 +257,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -272,9 +266,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -385,6 +377,11 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range +func Fsync(fd int) error { + return fsyncRange(fd, O_SYNC, 0, 0) +} + /* * Direct access */ @@ -401,7 +398,6 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Fsync(fd int) (err error) // readdir_r //sysnb Getpgid(pid int) (pgid int, err error) @@ -523,8 +519,10 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 95ac3946b..0ce452326 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -163,9 +163,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -179,9 +177,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -210,9 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Nlen = sa.Nlen sa.raw.Alen = sa.Alen sa.raw.Slen = sa.Slen - for i := 0; i < len(sa.raw.Data); i++ { - sa.raw.Data[i] = sa.Data[i] - } + sa.raw.Data = sa.Data return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } @@ -228,9 +222,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Nlen = pp.Nlen sa.Alen = pp.Alen sa.Slen = pp.Slen - for i := 0; i < len(sa.Data); i++ { - sa.Data[i] = pp.Data[i] - } + sa.Data = pp.Data return sa, nil case AF_UNIX: @@ -262,9 +254,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -273,9 +263,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return anyToSockaddrGOOS(fd, rsa) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index a8c13317d..0eaab9131 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -159,8 +159,10 @@ func Pipe(p []int) (err error) { } var x [2]int32 err = pipe(&x) - p[0] = int(x[0]) - p[1] = int(x[1]) + if err == nil { + p[0] = int(x[0]) + p[1] = int(x[1]) + } return } @@ -430,8 +432,25 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } -func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { - mib, err := sysctlmib(name) +func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + var kinfo KinfoProc + n := uintptr(SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofKinfoProc { + return nil, EIO + } + return &kinfo, nil +} + +func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { + mib, err := sysctlmib(name, args...) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 5af108a50..2e37c3167 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -101,7 +101,10 @@ func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } - p[0], p[1], err = pipe() + r, w, err := pipe() + if err == nil { + p[0], p[1] = r, w + } return } @@ -114,7 +117,10 @@ func Pipe2(p []int, flags int) (err error) { var pp [2]_C_int // pipe2 on dragonfly takes an fds array as an argument, but still // returns the file descriptors. - p[0], p[1], err = pipe2(&pp, flags) + r, w, err := pipe2(&pp, flags) + if err == nil { + p[0], p[1] = r, w + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 18c392cf3..2f650ae66 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -110,8 +110,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fff38a84c..f432b0684 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -131,8 +131,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -372,9 +374,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -387,9 +387,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -438,9 +436,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Hatype = sa.Hatype sa.raw.Pkttype = sa.Pkttype sa.raw.Halen = sa.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil } @@ -855,12 +851,10 @@ func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Addr == nil { return nil, 0, EINVAL } - sa.raw.Family = AF_TIPC sa.raw.Scope = int8(sa.Scope) sa.raw.Addrtype = sa.Addr.tipcAddrtype() sa.raw.Addr = sa.Addr.tipcAddr() - return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil } @@ -874,9 +868,7 @@ type SockaddrL2TPIP struct { func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET sa.raw.Conn_id = sa.ConnId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil } @@ -892,9 +884,7 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET6 sa.raw.Conn_id = sa.ConnId sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } @@ -990,9 +980,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Hatype = pp.Hatype sa.Pkttype = pp.Pkttype sa.Halen = pp.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_UNIX: @@ -1031,18 +1019,14 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) sa := new(SockaddrL2TPIP) sa.ConnId = pp.Conn_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1058,9 +1042,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrL2TPIP6) sa.ConnId = pp.Conn_id sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) @@ -1068,9 +1050,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1797,6 +1777,16 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri return mount(source, target, fstype, flags, datap) } +//sys mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) = SYS_MOUNT_SETATTR + +// MountSetattr is a wrapper for mount_setattr(2). +// https://man7.org/linux/man-pages/man2/mount_setattr.2.html +// +// Requires kernel >= 5.12. +func MountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr) error { + return mountSetattr(dirfd, pathname, flags, attr, unsafe.Sizeof(*attr)) +} + func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 853d5f0f4..696fed496 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,14 +110,8 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (fd1 int, fd2 int, err error) - func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - p[0], p[1], err = pipe() - return + return Pipe2(p, 0) } //sysnb pipe2(p *[2]_C_int, flags int) (err error) @@ -128,8 +122,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 22b550385..11b1d419d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -87,8 +87,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index d2a6495c7..5c813921e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -66,8 +66,10 @@ func Pipe(p []int) (err error) { if n != 0 { return err } - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return nil } @@ -79,8 +81,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -92,9 +96,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -107,9 +109,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -417,9 +417,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -428,9 +426,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 1ffd8bfcf..f8616f454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -67,9 +67,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -83,9 +81,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -144,9 +140,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -155,9 +149,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -587,8 +579,10 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 78d4b85ec..bcc45d108 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -116,6 +116,7 @@ const ( ARPHRD_LAPB = 0x204 ARPHRD_LOCALTLK = 0x305 ARPHRD_LOOPBACK = 0x304 + ARPHRD_MCTP = 0x122 ARPHRD_METRICOM = 0x17 ARPHRD_NETLINK = 0x338 ARPHRD_NETROM = 0x0 @@ -472,6 +473,7 @@ const ( DM_DEV_WAIT = 0xc138fd08 DM_DIR = "mapper" DM_GET_TARGET_VERSION = 0xc138fd11 + DM_IMA_MEASUREMENT_FLAG = 0x80000 DM_INACTIVE_PRESENT_FLAG = 0x40 DM_INTERNAL_SUSPEND_FLAG = 0x40000 DM_IOCTL = 0xfd @@ -716,6 +718,7 @@ const ( ETH_P_LOOPBACK = 0x9000 ETH_P_MACSEC = 0x88e5 ETH_P_MAP = 0xf9 + ETH_P_MCTP = 0xfa ETH_P_MOBITEX = 0x15 ETH_P_MPLS_MC = 0x8848 ETH_P_MPLS_UC = 0x8847 @@ -751,6 +754,21 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EV_ABS = 0x3 + EV_CNT = 0x20 + EV_FF = 0x15 + EV_FF_STATUS = 0x17 + EV_KEY = 0x1 + EV_LED = 0x11 + EV_MAX = 0x1f + EV_MSC = 0x4 + EV_PWR = 0x16 + EV_REL = 0x2 + EV_REP = 0x14 + EV_SND = 0x12 + EV_SW = 0x5 + EV_SYN = 0x0 + EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 @@ -789,9 +807,11 @@ const ( FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EPIDFD = -0x2 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -811,6 +831,7 @@ const ( FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 + FAN_NOPIDFD = -0x1 FAN_ONDIR = 0x40000000 FAN_OPEN = 0x20 FAN_OPEN_EXEC = 0x1000 @@ -821,6 +842,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 + FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1454,6 +1476,18 @@ const ( MNT_FORCE = 0x1 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 + MOUNT_ATTR_IDMAP = 0x100000 + MOUNT_ATTR_NOATIME = 0x10 + MOUNT_ATTR_NODEV = 0x4 + MOUNT_ATTR_NODIRATIME = 0x80 + MOUNT_ATTR_NOEXEC = 0x8 + MOUNT_ATTR_NOSUID = 0x2 + MOUNT_ATTR_NOSYMFOLLOW = 0x200000 + MOUNT_ATTR_RDONLY = 0x1 + MOUNT_ATTR_RELATIME = 0x0 + MOUNT_ATTR_SIZE_VER0 = 0x20 + MOUNT_ATTR_STRICTATIME = 0x20 + MOUNT_ATTR__ATIME = 0x70 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1997,6 +2031,7 @@ const ( PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 + PR_SPEC_L1D_FLUSH = 0x2 PR_SPEC_NOT_AFFECTED = 0x0 PR_SPEC_PRCTL = 0x1 PR_SPEC_STORE_BYPASS = 0x0 @@ -2432,12 +2467,15 @@ const ( SMART_WRITE_THRESHOLDS = 0xd7 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b + SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 + SOCK_RCVBUF_LOCK = 0x2 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 + SOCK_SNDBUF_LOCK = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2788,6 +2826,13 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 + WGALLOWEDIP_A_MAX = 0x3 + WGDEVICE_A_MAX = 0x8 + WGPEER_A_MAX = 0xa + WG_CMD_MAX = 0x1 + WG_GENL_NAME = "wireguard" + WG_GENL_VERSION = 0x1 + WG_KEY_LEN = 0x20 WIN_ACKMEDIACHANGE = 0xdb WIN_CHECKPOWERMODE1 = 0xe5 WIN_CHECKPOWERMODE2 = 0x98 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 697811a46..3ca40ca7f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -5,7 +5,7 @@ // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 7d8d93bfc..ead332091 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -5,7 +5,7 @@ // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go package unix @@ -294,6 +294,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index f707d5089..39bdc9455 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -5,7 +5,7 @@ // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -300,6 +300,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3a67a9c85..9aec987db 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -5,7 +5,7 @@ // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go package unix @@ -290,6 +290,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index a7ccef56c..a8bba9491 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -5,7 +5,7 @@ // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index f7b7cec91..ee9e7e202 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -5,7 +5,7 @@ // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4fcacf958..ba4b288a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -5,7 +5,7 @@ // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6f6c223a2..bc93afc36 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -5,7 +5,7 @@ // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 59e522bcf..9295e6947 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -5,7 +5,7 @@ // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -348,6 +348,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index d4264a0f7..1fa081c9a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -5,7 +5,7 @@ // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -352,6 +352,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 21cbec1dd..74b321149 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -5,7 +5,7 @@ // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -352,6 +352,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 9b05bf12f..c91c8ac5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -5,7 +5,7 @@ // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -281,6 +281,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index bd82ace09..b66bf2228 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -5,7 +5,7 @@ // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go package unix @@ -356,6 +356,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1f8bded56..f7fb149b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -5,7 +5,7 @@ // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -347,6 +347,7 @@ const ( SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 + SO_BUF_LOCK = 0x51 SO_BUSY_POLL = 0x30 SO_BUSY_POLL_BUDGET = 0x49 SO_CNX_ADVICE = 0x37 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 91a23cc72..85e0cc386 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -17,6 +17,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -29,7 +30,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -255,6 +255,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + r0, er := C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.acct(C.uintptr_t(_p0)) @@ -379,16 +389,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - r0, er := C.fsync(C.int(fd)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, er := C.getpgid(C.int(pid)) pgid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 33c2609b8..f1d4a73b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -135,6 +135,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + _, e1 := callfsync_range(fd, how, start, length) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -283,16 +293,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, e1 := callfsync(fd) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, e1 := callgetpgid(pid) pgid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 8b737fa97..2caa5adf9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -18,6 +18,7 @@ import ( //go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o" //go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fsync_range fsync_range "libc.a/shr_64.o" //go:cgo_import_dynamic libc_acct acct "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o" @@ -30,7 +31,6 @@ import ( //go:cgo_import_dynamic libc_fchmodat fchmodat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fchownat fchownat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fdatasync fdatasync "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_fsync fsync "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgid getpgid "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgrp getpgrp "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" @@ -136,6 +136,7 @@ import ( //go:linkname libc_wait4 libc_wait4 //go:linkname libc_ioctl libc_ioctl //go:linkname libc_fcntl libc_fcntl +//go:linkname libc_fsync_range libc_fsync_range //go:linkname libc_acct libc_acct //go:linkname libc_chdir libc_chdir //go:linkname libc_chroot libc_chroot @@ -148,7 +149,6 @@ import ( //go:linkname libc_fchmodat libc_fchmodat //go:linkname libc_fchownat libc_fchownat //go:linkname libc_fdatasync libc_fdatasync -//go:linkname libc_fsync libc_fsync //go:linkname libc_getpgid libc_getpgid //go:linkname libc_getpgrp libc_getpgrp //go:linkname libc_getpid libc_getpid @@ -257,6 +257,7 @@ var ( libc_wait4, libc_ioctl, libc_fcntl, + libc_fsync_range, libc_acct, libc_chdir, libc_chroot, @@ -269,7 +270,6 @@ var ( libc_fchmodat, libc_fchownat, libc_fdatasync, - libc_fsync, libc_getpgid, libc_getpgrp, libc_getpid, @@ -430,6 +430,13 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync_range)), 4, uintptr(fd), uintptr(how), uintptr(start), uintptr(length), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_acct)), 1, _p0, 0, 0, 0, 0, 0) return @@ -514,13 +521,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 3c260917e..944a714b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -16,6 +16,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -28,7 +29,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -199,6 +199,14 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.acct(C.uintptr_t(_p0))) e1 = syscall.GetErrno() @@ -295,14 +303,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.fsync(C.int(fd))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.getpgid(C.int(pid))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 4f5da1f54..93edda4c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -409,6 +409,21 @@ func mount(source string, target string, fstype string, flags uintptr, data *byt // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT_SETATTR, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 4726ab30a..51d0c0742 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fe71456db..df2efb6db 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 0b5b2f014..c8536c2c9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index bfca28648..8b981bfc2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index aa7ce85d1..31847d230 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -444,4 +444,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index b83032638..3503cbbde 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -366,4 +366,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index d75f65a0a..5ecd24bf6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -7,6 +7,7 @@ package unix const ( + SYS_SYSCALL_MASK = 0 SYS_RESTART_SYSCALL = 0 SYS_EXIT = 1 SYS_FORK = 2 @@ -407,4 +408,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 8b02f09e9..7e5c94cc7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -311,4 +311,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 026695abb..e1e2a2bf5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -428,4 +428,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 4444 SYS_LANDLOCK_ADD_RULE = 4445 SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 7320ba958..7651915a3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -358,4 +358,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 5444 SYS_LANDLOCK_ADD_RULE = 5445 SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 45082dd67..a26a2c050 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -358,4 +358,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 5444 SYS_LANDLOCK_ADD_RULE = 5445 SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 570a857a5..fda9a6a99 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -428,4 +428,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 4444 SYS_LANDLOCK_ADD_RULE = 4445 SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 638498d62..e8496150d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -435,4 +435,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 702beebfe..5ee0678a3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -407,4 +407,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index bfc87ea44..29c0f9a39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -407,4 +407,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a390e147d..5c9a9a3b6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -309,4 +309,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 3e791e6cd..913f50f98 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -372,4 +372,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 78802a5cf..0de03a722 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -386,4 +386,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 7efe5ccba..885842c0e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -641,13 +641,13 @@ type Eproc struct { Tdev int32 Tpgid int32 Tsess uintptr - Wmesg [8]int8 + Wmesg [8]byte Xsize int32 Xrssize int16 Xccount int16 Xswrss int16 Flag int32 - Login [12]int8 + Login [12]byte Spare [4]int32 _ [4]byte } @@ -688,7 +688,7 @@ type ExternProc struct { P_priority uint8 P_usrpri uint8 P_nice int8 - P_comm [17]int8 + P_comm [17]byte P_pgrp uintptr P_addr uintptr P_xstat uint16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23a2efe8..b23c02337 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -641,13 +641,13 @@ type Eproc struct { Tdev int32 Tpgid int32 Tsess uintptr - Wmesg [8]int8 + Wmesg [8]byte Xsize int32 Xrssize int16 Xccount int16 Xswrss int16 Flag int32 - Login [12]int8 + Login [12]byte Spare [4]int32 _ [4]byte } @@ -688,7 +688,7 @@ type ExternProc struct { P_priority uint8 P_usrpri uint8 P_nice int8 - P_comm [17]int8 + P_comm [17]byte P_pgrp uintptr P_addr uintptr P_xstat uint16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 249ecfcd4..f6f0d79c4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -743,6 +743,8 @@ const ( AT_STATX_FORCE_SYNC = 0x2000 AT_STATX_DONT_SYNC = 0x4000 + AT_RECURSIVE = 0x8000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 @@ -865,6 +867,7 @@ const ( CTRL_CMD_NEWMCAST_GRP = 0x7 CTRL_CMD_DELMCAST_GRP = 0x8 CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_CMD_GETPOLICY = 0xa CTRL_ATTR_UNSPEC = 0x0 CTRL_ATTR_FAMILY_ID = 0x1 CTRL_ATTR_FAMILY_NAME = 0x2 @@ -873,12 +876,19 @@ const ( CTRL_ATTR_MAXATTR = 0x5 CTRL_ATTR_OPS = 0x6 CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_POLICY = 0x8 + CTRL_ATTR_OP_POLICY = 0x9 + CTRL_ATTR_OP = 0xa CTRL_ATTR_OP_UNSPEC = 0x0 CTRL_ATTR_OP_ID = 0x1 CTRL_ATTR_OP_FLAGS = 0x2 CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 + CTRL_ATTR_POLICY_UNSPEC = 0x0 + CTRL_ATTR_POLICY_DO = 0x1 + CTRL_ATTR_POLICY_DUMP = 0x2 + CTRL_ATTR_POLICY_DUMP_MAX = 0x2 ) const ( @@ -3264,7 +3274,8 @@ const ( LWTUNNEL_ENCAP_BPF = 0x6 LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 - LWTUNNEL_ENCAP_MAX = 0x8 + LWTUNNEL_ENCAP_IOAM6 = 0x9 + LWTUNNEL_ENCAP_MAX = 0x9 MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3617,7 +3628,9 @@ const ( ETHTOOL_A_COALESCE_TX_USECS_HIGH = 0x15 ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 0x16 ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 - ETHTOOL_A_COALESCE_MAX = 0x17 + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x19 ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3956,3 +3969,77 @@ const ( SHM_RDONLY = 0x1000 SHM_RND = 0x2000 ) + +type MountAttr struct { + Attr_set uint64 + Attr_clr uint64 + Propagation uint64 + Userns_fd uint64 +} + +const ( + WG_CMD_GET_DEVICE = 0x0 + WG_CMD_SET_DEVICE = 0x1 + WGDEVICE_F_REPLACE_PEERS = 0x1 + WGDEVICE_A_UNSPEC = 0x0 + WGDEVICE_A_IFINDEX = 0x1 + WGDEVICE_A_IFNAME = 0x2 + WGDEVICE_A_PRIVATE_KEY = 0x3 + WGDEVICE_A_PUBLIC_KEY = 0x4 + WGDEVICE_A_FLAGS = 0x5 + WGDEVICE_A_LISTEN_PORT = 0x6 + WGDEVICE_A_FWMARK = 0x7 + WGDEVICE_A_PEERS = 0x8 + WGPEER_F_REMOVE_ME = 0x1 + WGPEER_F_REPLACE_ALLOWEDIPS = 0x2 + WGPEER_F_UPDATE_ONLY = 0x4 + WGPEER_A_UNSPEC = 0x0 + WGPEER_A_PUBLIC_KEY = 0x1 + WGPEER_A_PRESHARED_KEY = 0x2 + WGPEER_A_FLAGS = 0x3 + WGPEER_A_ENDPOINT = 0x4 + WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL = 0x5 + WGPEER_A_LAST_HANDSHAKE_TIME = 0x6 + WGPEER_A_RX_BYTES = 0x7 + WGPEER_A_TX_BYTES = 0x8 + WGPEER_A_ALLOWEDIPS = 0x9 + WGPEER_A_PROTOCOL_VERSION = 0xa + WGALLOWEDIP_A_UNSPEC = 0x0 + WGALLOWEDIP_A_FAMILY = 0x1 + WGALLOWEDIP_A_IPADDR = 0x2 + WGALLOWEDIP_A_CIDR_MASK = 0x3 +) + +const ( + NL_ATTR_TYPE_INVALID = 0x0 + NL_ATTR_TYPE_FLAG = 0x1 + NL_ATTR_TYPE_U8 = 0x2 + NL_ATTR_TYPE_U16 = 0x3 + NL_ATTR_TYPE_U32 = 0x4 + NL_ATTR_TYPE_U64 = 0x5 + NL_ATTR_TYPE_S8 = 0x6 + NL_ATTR_TYPE_S16 = 0x7 + NL_ATTR_TYPE_S32 = 0x8 + NL_ATTR_TYPE_S64 = 0x9 + NL_ATTR_TYPE_BINARY = 0xa + NL_ATTR_TYPE_STRING = 0xb + NL_ATTR_TYPE_NUL_STRING = 0xc + NL_ATTR_TYPE_NESTED = 0xd + NL_ATTR_TYPE_NESTED_ARRAY = 0xe + NL_ATTR_TYPE_BITFIELD32 = 0xf + + NL_POLICY_TYPE_ATTR_UNSPEC = 0x0 + NL_POLICY_TYPE_ATTR_TYPE = 0x1 + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 0x2 + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 0x3 + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 0x4 + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 0x5 + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 0x6 + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 0x7 + NL_POLICY_TYPE_ATTR_POLICY_IDX = 0x8 + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 0x9 + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 0xa + NL_POLICY_TYPE_ATTR_PAD = 0xb + NL_POLICY_TYPE_ATTR_MASK = 0xc + NL_POLICY_TYPE_ATTR_MAX = 0xc +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index eeeb9aa39..bea254945 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index d30e1155c..b8c8f2894 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 69d029752..4db443016 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 28a0455bc..3ebcad8a8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 64a845483..3eb33e48a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index a1b7dee41..79a944672 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 936fa6a26..8f4b107ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 5dd546fbf..e4eb21798 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 947b32e43..d5b21f0f7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2a606151b..5188d142b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index d0d735d02..de4dd4c73 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 95e3d6d06..dccbf9b06 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cccf1ef26..635880610 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 44fcbe4e9..765edc13f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2a8b1e6f7..baf5fe650 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index b1759cf70..e21ae8ecf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index e807de206..f190651cd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -565,12 +565,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ff3aecaee..84747c582 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 9ecda6917..ac5c8b637 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 7a11e83b7..855698bb2 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -9,8 +9,6 @@ package windows import ( errorspkg "errors" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // EscapeArg rewrites command line argument s as prescribed @@ -147,8 +145,12 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListCo } return nil, err } + alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) + if err != nil { + return nil, err + } // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. - al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0]))} + al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(alloc))} err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) if err != nil { return nil, err @@ -157,36 +159,17 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListCo } // Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. -// Note that the value passed to this function will be copied into memory -// allocated by LocalAlloc, the contents of which should not contain any -// Go-managed pointers, even if the passed value itself is a Go-managed -// pointer. func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { - alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) - if err != nil { - return err - } - var src, dst []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - hdr.Data = value - hdr.Cap = int(size) - hdr.Len = int(size) - hdr = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) - hdr.Data = unsafe.Pointer(alloc) - hdr.Cap = int(size) - hdr.Len = int(size) - copy(dst, src) - al.heapAllocations = append(al.heapAllocations, alloc) - return updateProcThreadAttribute(al.data, 0, attribute, unsafe.Pointer(alloc), size, nil, nil) + al.pointers = append(al.pointers, value) + return updateProcThreadAttribute(al.data, 0, attribute, value, size, nil, nil) } // Delete frees ProcThreadAttributeList's resources. func (al *ProcThreadAttributeListContainer) Delete() { deleteProcThreadAttributeList(al.data) - for i := range al.heapAllocations { - LocalFree(Handle(al.heapAllocations[i])) - } - al.heapAllocations = nil + LocalFree(Handle(unsafe.Pointer(al.data))) + al.data = nil + al.pointers = nil } // List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 610291098..8563f79c5 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -7,4 +7,4 @@ package windows -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go setupapi_windows.go diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index c25648343..906325e09 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows // Package registry provides access to the Windows registry. @@ -24,6 +25,7 @@ package registry import ( "io" + "runtime" "syscall" "time" ) @@ -113,6 +115,13 @@ func OpenRemoteKey(pcname string, k Key) (Key, error) { // The parameter n controls the number of returned names, // analogous to the way os.File.Readdirnames works. func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + names := make([]string, 0) // Registry key size limit is 255 bytes and described there: // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go index 50c32a3f4..ee74927d3 100644 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build generate // +build generate package registry diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go index e66643cba..417335123 100644 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index f25e7e97a..2789f6f18 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 5b28ae168..f8deca839 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -17,8 +17,6 @@ const ( SC_MANAGER_ALL_ACCESS = 0xf003f ) -//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW - const ( SERVICE_KERNEL_DRIVER = 1 SERVICE_FILE_SYSTEM_DRIVER = 2 @@ -133,6 +131,14 @@ const ( SC_EVENT_DATABASE_CHANGE = 0 SC_EVENT_PROPERTY_CHANGE = 1 SC_EVENT_STATUS_CHANGE = 2 + + SERVICE_START_REASON_DEMAND = 0x00000001 + SERVICE_START_REASON_AUTO = 0x00000002 + SERVICE_START_REASON_TRIGGER = 0x00000004 + SERVICE_START_REASON_RESTART_ON_FAILURE = 0x00000008 + SERVICE_START_REASON_DELAYEDAUTO = 0x00000010 + + SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) type SERVICE_STATUS struct { @@ -217,6 +223,7 @@ type QUERY_SERVICE_LOCK_STATUS struct { LockDuration uint32 } +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW @@ -237,3 +244,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications? //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW +//sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go new file mode 100644 index 000000000..14027da3f --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -0,0 +1,1425 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "encoding/binary" + "errors" + "fmt" + "runtime" + "strings" + "syscall" + "unsafe" +) + +// This file contains functions that wrap SetupAPI.dll and CfgMgr32.dll, +// core system functions for managing hardware devices, drivers, and the PnP tree. +// Information about these APIs can be found at: +// https://docs.microsoft.com/en-us/windows-hardware/drivers/install/setupapi +// https://docs.microsoft.com/en-us/windows/win32/devinst/cfgmgr32- + +const ( + ERROR_EXPECTED_SECTION_NAME Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) + +const ( + MAX_DEVICE_ID_LEN = 200 + MAX_DEVNODE_ID_LEN = MAX_DEVICE_ID_LEN + MAX_GUID_STRING_LEN = 39 // 38 chars + terminator null + MAX_CLASS_NAME_LEN = 32 + MAX_PROFILE_LEN = 80 + MAX_CONFIG_VALUE = 9999 + MAX_INSTANCE_VALUE = 9999 + CONFIGMG_VERSION = 0x0400 +) + +// Maximum string length constants +const ( + LINE_LEN = 256 // Windows 9x-compatible maximum for displayable strings coming from a device INF. + MAX_INF_STRING_LENGTH = 4096 // Actual maximum size of an INF string (including string substitutions). + MAX_INF_SECTION_NAME_LENGTH = 255 // For Windows 9x compatibility, INF section names should be constrained to 32 characters. + MAX_TITLE_LEN = 60 + MAX_INSTRUCTION_LEN = 256 + MAX_LABEL_LEN = 30 + MAX_SERVICE_NAME_LEN = 256 + MAX_SUBTITLE_LEN = 256 +) + +const ( + // SP_MAX_MACHINENAME_LENGTH defines maximum length of a machine name in the format expected by ConfigMgr32 CM_Connect_Machine (i.e., "\\\\MachineName\0"). + SP_MAX_MACHINENAME_LENGTH = MAX_PATH + 3 +) + +// HSPFILEQ is type for setup file queue +type HSPFILEQ uintptr + +// DevInfo holds reference to device information set +type DevInfo Handle + +// DEVINST is a handle usually recognized by cfgmgr32 APIs +type DEVINST uint32 + +// DevInfoData is a device information structure (references a device instance that is a member of a device information set) +type DevInfoData struct { + size uint32 + ClassGUID GUID + DevInst DEVINST + _ uintptr +} + +// DevInfoListDetailData is a structure for detailed information on a device information set (used for SetupDiGetDeviceInfoListDetail which supersedes the functionality of SetupDiGetDeviceInfoListClass). +type DevInfoListDetailData struct { + size uint32 // Use unsafeSizeOf method + ClassGUID GUID + RemoteMachineHandle Handle + remoteMachineName [SP_MAX_MACHINENAME_LENGTH]uint16 +} + +func (*DevInfoListDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DevInfoListDetailData{}.remoteMachineName) + unsafe.Sizeof(DevInfoListDetailData{}.remoteMachineName)) + } + return uint32(unsafe.Sizeof(DevInfoListDetailData{})) +} + +func (data *DevInfoListDetailData) RemoteMachineName() string { + return UTF16ToString(data.remoteMachineName[:]) +} + +func (data *DevInfoListDetailData) SetRemoteMachineName(remoteMachineName string) error { + str, err := UTF16FromString(remoteMachineName) + if err != nil { + return err + } + copy(data.remoteMachineName[:], str) + return nil +} + +// DI_FUNCTION is function type for device installer +type DI_FUNCTION uint32 + +const ( + DIF_SELECTDEVICE DI_FUNCTION = 0x00000001 + DIF_INSTALLDEVICE DI_FUNCTION = 0x00000002 + DIF_ASSIGNRESOURCES DI_FUNCTION = 0x00000003 + DIF_PROPERTIES DI_FUNCTION = 0x00000004 + DIF_REMOVE DI_FUNCTION = 0x00000005 + DIF_FIRSTTIMESETUP DI_FUNCTION = 0x00000006 + DIF_FOUNDDEVICE DI_FUNCTION = 0x00000007 + DIF_SELECTCLASSDRIVERS DI_FUNCTION = 0x00000008 + DIF_VALIDATECLASSDRIVERS DI_FUNCTION = 0x00000009 + DIF_INSTALLCLASSDRIVERS DI_FUNCTION = 0x0000000A + DIF_CALCDISKSPACE DI_FUNCTION = 0x0000000B + DIF_DESTROYPRIVATEDATA DI_FUNCTION = 0x0000000C + DIF_VALIDATEDRIVER DI_FUNCTION = 0x0000000D + DIF_DETECT DI_FUNCTION = 0x0000000F + DIF_INSTALLWIZARD DI_FUNCTION = 0x00000010 + DIF_DESTROYWIZARDDATA DI_FUNCTION = 0x00000011 + DIF_PROPERTYCHANGE DI_FUNCTION = 0x00000012 + DIF_ENABLECLASS DI_FUNCTION = 0x00000013 + DIF_DETECTVERIFY DI_FUNCTION = 0x00000014 + DIF_INSTALLDEVICEFILES DI_FUNCTION = 0x00000015 + DIF_UNREMOVE DI_FUNCTION = 0x00000016 + DIF_SELECTBESTCOMPATDRV DI_FUNCTION = 0x00000017 + DIF_ALLOW_INSTALL DI_FUNCTION = 0x00000018 + DIF_REGISTERDEVICE DI_FUNCTION = 0x00000019 + DIF_NEWDEVICEWIZARD_PRESELECT DI_FUNCTION = 0x0000001A + DIF_NEWDEVICEWIZARD_SELECT DI_FUNCTION = 0x0000001B + DIF_NEWDEVICEWIZARD_PREANALYZE DI_FUNCTION = 0x0000001C + DIF_NEWDEVICEWIZARD_POSTANALYZE DI_FUNCTION = 0x0000001D + DIF_NEWDEVICEWIZARD_FINISHINSTALL DI_FUNCTION = 0x0000001E + DIF_INSTALLINTERFACES DI_FUNCTION = 0x00000020 + DIF_DETECTCANCEL DI_FUNCTION = 0x00000021 + DIF_REGISTER_COINSTALLERS DI_FUNCTION = 0x00000022 + DIF_ADDPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000023 + DIF_ADDPROPERTYPAGE_BASIC DI_FUNCTION = 0x00000024 + DIF_TROUBLESHOOTER DI_FUNCTION = 0x00000026 + DIF_POWERMESSAGEWAKE DI_FUNCTION = 0x00000027 + DIF_ADDREMOTEPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000028 + DIF_UPDATEDRIVER_UI DI_FUNCTION = 0x00000029 + DIF_FINISHINSTALL_ACTION DI_FUNCTION = 0x0000002A +) + +// DevInstallParams is device installation parameters structure (associated with a particular device information element, or globally with a device information set) +type DevInstallParams struct { + size uint32 + Flags DI_FLAGS + FlagsEx DI_FLAGSEX + hwndParent uintptr + InstallMsgHandler uintptr + InstallMsgHandlerContext uintptr + FileQueue HSPFILEQ + _ uintptr + _ uint32 + driverPath [MAX_PATH]uint16 +} + +func (params *DevInstallParams) DriverPath() string { + return UTF16ToString(params.driverPath[:]) +} + +func (params *DevInstallParams) SetDriverPath(driverPath string) error { + str, err := UTF16FromString(driverPath) + if err != nil { + return err + } + copy(params.driverPath[:], str) + return nil +} + +// DI_FLAGS is SP_DEVINSTALL_PARAMS.Flags values +type DI_FLAGS uint32 + +const ( + // Flags for choosing a device + DI_SHOWOEM DI_FLAGS = 0x00000001 // support Other... button + DI_SHOWCOMPAT DI_FLAGS = 0x00000002 // show compatibility list + DI_SHOWCLASS DI_FLAGS = 0x00000004 // show class list + DI_SHOWALL DI_FLAGS = 0x00000007 // both class & compat list shown + DI_NOVCP DI_FLAGS = 0x00000008 // don't create a new copy queue--use caller-supplied FileQueue + DI_DIDCOMPAT DI_FLAGS = 0x00000010 // Searched for compatible devices + DI_DIDCLASS DI_FLAGS = 0x00000020 // Searched for class devices + DI_AUTOASSIGNRES DI_FLAGS = 0x00000040 // No UI for resources if possible + + // Flags returned by DiInstallDevice to indicate need to reboot/restart + DI_NEEDRESTART DI_FLAGS = 0x00000080 // Reboot required to take effect + DI_NEEDREBOOT DI_FLAGS = 0x00000100 // "" + + // Flags for device installation + DI_NOBROWSE DI_FLAGS = 0x00000200 // no Browse... in InsertDisk + + // Flags set by DiBuildDriverInfoList + DI_MULTMFGS DI_FLAGS = 0x00000400 // Set if multiple manufacturers in class driver list + + // Flag indicates that device is disabled + DI_DISABLED DI_FLAGS = 0x00000800 // Set if device disabled + + // Flags for Device/Class Properties + DI_GENERALPAGE_ADDED DI_FLAGS = 0x00001000 + DI_RESOURCEPAGE_ADDED DI_FLAGS = 0x00002000 + + // Flag to indicate the setting properties for this Device (or class) caused a change so the Dev Mgr UI probably needs to be updated. + DI_PROPERTIES_CHANGE DI_FLAGS = 0x00004000 + + // Flag to indicate that the sorting from the INF file should be used. + DI_INF_IS_SORTED DI_FLAGS = 0x00008000 + + // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 + + // Flag that prevents ConfigMgr from removing/re-enumerating devices during device + // registration, installation, and deletion. + DI_DONOTCALLCONFIGMG DI_FLAGS = 0x00020000 + + // The following flag can be used to install a device disabled + DI_INSTALLDISABLED DI_FLAGS = 0x00040000 + + // Flag that causes SetupDiBuildDriverInfoList to build a device's compatible driver + // list from its existing class driver list, instead of the normal INF search. + DI_COMPAT_FROM_CLASS DI_FLAGS = 0x00080000 + + // This flag is set if the Class Install params should be used. + DI_CLASSINSTALLPARAMS DI_FLAGS = 0x00100000 + + // This flag is set if the caller of DiCallClassInstaller does NOT want the internal default action performed if the Class installer returns ERROR_DI_DO_DEFAULT. + DI_NODI_DEFAULTACTION DI_FLAGS = 0x00200000 + + // Flags for device installation + DI_QUIETINSTALL DI_FLAGS = 0x00800000 // don't confuse the user with questions or excess info + DI_NOFILECOPY DI_FLAGS = 0x01000000 // No file Copy necessary + DI_FORCECOPY DI_FLAGS = 0x02000000 // Force files to be copied from install path + DI_DRIVERPAGE_ADDED DI_FLAGS = 0x04000000 // Prop provider added Driver page. + DI_USECI_SELECTSTRINGS DI_FLAGS = 0x08000000 // Use Class Installer Provided strings in the Select Device Dlg + DI_OVERRIDE_INFFLAGS DI_FLAGS = 0x10000000 // Override INF flags + DI_PROPS_NOCHANGEUSAGE DI_FLAGS = 0x20000000 // No Enable/Disable in General Props + + DI_NOSELECTICONS DI_FLAGS = 0x40000000 // No small icons in select device dialogs + + DI_NOWRITE_IDS DI_FLAGS = 0x80000000 // Don't write HW & Compat IDs on install +) + +// DI_FLAGSEX is SP_DEVINSTALL_PARAMS.FlagsEx values +type DI_FLAGSEX uint32 + +const ( + DI_FLAGSEX_CI_FAILED DI_FLAGSEX = 0x00000004 // Failed to Load/Call class installer + DI_FLAGSEX_FINISHINSTALL_ACTION DI_FLAGSEX = 0x00000008 // Class/co-installer wants to get a DIF_FINISH_INSTALL action in client context. + DI_FLAGSEX_DIDINFOLIST DI_FLAGSEX = 0x00000010 // Did the Class Info List + DI_FLAGSEX_DIDCOMPATINFO DI_FLAGSEX = 0x00000020 // Did the Compat Info List + DI_FLAGSEX_FILTERCLASSES DI_FLAGSEX = 0x00000040 + DI_FLAGSEX_SETFAILEDINSTALL DI_FLAGSEX = 0x00000080 + DI_FLAGSEX_DEVICECHANGE DI_FLAGSEX = 0x00000100 + DI_FLAGSEX_ALWAYSWRITEIDS DI_FLAGSEX = 0x00000200 + DI_FLAGSEX_PROPCHANGE_PENDING DI_FLAGSEX = 0x00000400 // One or more device property sheets have had changes made to them, and need to have a DIF_PROPERTYCHANGE occur. + DI_FLAGSEX_ALLOWEXCLUDEDDRVS DI_FLAGSEX = 0x00000800 + DI_FLAGSEX_NOUIONQUERYREMOVE DI_FLAGSEX = 0x00001000 + DI_FLAGSEX_USECLASSFORCOMPAT DI_FLAGSEX = 0x00002000 // Use the device's class when building compat drv list. (Ignored if DI_COMPAT_FROM_CLASS flag is specified.) + DI_FLAGSEX_NO_DRVREG_MODIFY DI_FLAGSEX = 0x00008000 // Don't run AddReg and DelReg for device's software (driver) key. + DI_FLAGSEX_IN_SYSTEM_SETUP DI_FLAGSEX = 0x00010000 // Installation is occurring during initial system setup. + DI_FLAGSEX_INET_DRIVER DI_FLAGSEX = 0x00020000 // Driver came from Windows Update + DI_FLAGSEX_APPENDDRIVERLIST DI_FLAGSEX = 0x00040000 // Cause SetupDiBuildDriverInfoList to append a new driver list to an existing list. + DI_FLAGSEX_PREINSTALLBACKUP DI_FLAGSEX = 0x00080000 // not used + DI_FLAGSEX_BACKUPONREPLACE DI_FLAGSEX = 0x00100000 // not used + DI_FLAGSEX_DRIVERLIST_FROM_URL DI_FLAGSEX = 0x00200000 // build driver list from INF(s) retrieved from URL specified in SP_DEVINSTALL_PARAMS.DriverPath (empty string means Windows Update website) + DI_FLAGSEX_EXCLUDE_OLD_INET_DRIVERS DI_FLAGSEX = 0x00800000 // Don't include old Internet drivers when building a driver list. Ignored on Windows Vista and later. + DI_FLAGSEX_POWERPAGE_ADDED DI_FLAGSEX = 0x01000000 // class installer added their own power page + DI_FLAGSEX_FILTERSIMILARDRIVERS DI_FLAGSEX = 0x02000000 // only include similar drivers in class list + DI_FLAGSEX_INSTALLEDDRIVER DI_FLAGSEX = 0x04000000 // only add the installed driver to the class or compat driver list. Used in calls to SetupDiBuildDriverInfoList + DI_FLAGSEX_NO_CLASSLIST_NODE_MERGE DI_FLAGSEX = 0x08000000 // Don't remove identical driver nodes from the class list + DI_FLAGSEX_ALTPLATFORM_DRVSEARCH DI_FLAGSEX = 0x10000000 // Build driver list based on alternate platform information specified in associated file queue + DI_FLAGSEX_RESTART_DEVICE_ONLY DI_FLAGSEX = 0x20000000 // only restart the device drivers are being installed on as opposed to restarting all devices using those drivers. + DI_FLAGSEX_RECURSIVESEARCH DI_FLAGSEX = 0x40000000 // Tell SetupDiBuildDriverInfoList to do a recursive search + DI_FLAGSEX_SEARCH_PUBLISHED_INFS DI_FLAGSEX = 0x80000000 // Tell SetupDiBuildDriverInfoList to do a "published INF" search +) + +// ClassInstallHeader is the first member of any class install parameters structure. It contains the device installation request code that defines the format of the rest of the install parameters structure. +type ClassInstallHeader struct { + size uint32 + InstallFunction DI_FUNCTION +} + +func MakeClassInstallHeader(installFunction DI_FUNCTION) *ClassInstallHeader { + hdr := &ClassInstallHeader{InstallFunction: installFunction} + hdr.size = uint32(unsafe.Sizeof(*hdr)) + return hdr +} + +// DICS_STATE specifies values indicating a change in a device's state +type DICS_STATE uint32 + +const ( + DICS_ENABLE DICS_STATE = 0x00000001 // The device is being enabled. + DICS_DISABLE DICS_STATE = 0x00000002 // The device is being disabled. + DICS_PROPCHANGE DICS_STATE = 0x00000003 // The properties of the device have changed. + DICS_START DICS_STATE = 0x00000004 // The device is being started (if the request is for the currently active hardware profile). + DICS_STOP DICS_STATE = 0x00000005 // The device is being stopped. The driver stack will be unloaded and the CSCONFIGFLAG_DO_NOT_START flag will be set for the device. +) + +// DICS_FLAG specifies the scope of a device property change +type DICS_FLAG uint32 + +const ( + DICS_FLAG_GLOBAL DICS_FLAG = 0x00000001 // make change in all hardware profiles + DICS_FLAG_CONFIGSPECIFIC DICS_FLAG = 0x00000002 // make change in specified profile only + DICS_FLAG_CONFIGGENERAL DICS_FLAG = 0x00000004 // 1 or more hardware profile-specific changes to follow (obsolete) +) + +// PropChangeParams is a structure corresponding to a DIF_PROPERTYCHANGE install function. +type PropChangeParams struct { + ClassInstallHeader ClassInstallHeader + StateChange DICS_STATE + Scope DICS_FLAG + HwProfile uint32 +} + +// DI_REMOVEDEVICE specifies the scope of the device removal +type DI_REMOVEDEVICE uint32 + +const ( + DI_REMOVEDEVICE_GLOBAL DI_REMOVEDEVICE = 0x00000001 // Make this change in all hardware profiles. Remove information about the device from the registry. + DI_REMOVEDEVICE_CONFIGSPECIFIC DI_REMOVEDEVICE = 0x00000002 // Make this change to only the hardware profile specified by HwProfile. this flag only applies to root-enumerated devices. When Windows removes the device from the last hardware profile in which it was configured, Windows performs a global removal. +) + +// RemoveDeviceParams is a structure corresponding to a DIF_REMOVE install function. +type RemoveDeviceParams struct { + ClassInstallHeader ClassInstallHeader + Scope DI_REMOVEDEVICE + HwProfile uint32 +} + +// DrvInfoData is driver information structure (member of a driver info list that may be associated with a particular device instance, or (globally) with a device information set) +type DrvInfoData struct { + size uint32 + DriverType uint32 + _ uintptr + description [LINE_LEN]uint16 + mfgName [LINE_LEN]uint16 + providerName [LINE_LEN]uint16 + DriverDate Filetime + DriverVersion uint64 +} + +func (data *DrvInfoData) Description() string { + return UTF16ToString(data.description[:]) +} + +func (data *DrvInfoData) SetDescription(description string) error { + str, err := UTF16FromString(description) + if err != nil { + return err + } + copy(data.description[:], str) + return nil +} + +func (data *DrvInfoData) MfgName() string { + return UTF16ToString(data.mfgName[:]) +} + +func (data *DrvInfoData) SetMfgName(mfgName string) error { + str, err := UTF16FromString(mfgName) + if err != nil { + return err + } + copy(data.mfgName[:], str) + return nil +} + +func (data *DrvInfoData) ProviderName() string { + return UTF16ToString(data.providerName[:]) +} + +func (data *DrvInfoData) SetProviderName(providerName string) error { + str, err := UTF16FromString(providerName) + if err != nil { + return err + } + copy(data.providerName[:], str) + return nil +} + +// IsNewer method returns true if DrvInfoData date and version is newer than supplied parameters. +func (data *DrvInfoData) IsNewer(driverDate Filetime, driverVersion uint64) bool { + if data.DriverDate.HighDateTime > driverDate.HighDateTime { + return true + } + if data.DriverDate.HighDateTime < driverDate.HighDateTime { + return false + } + + if data.DriverDate.LowDateTime > driverDate.LowDateTime { + return true + } + if data.DriverDate.LowDateTime < driverDate.LowDateTime { + return false + } + + if data.DriverVersion > driverVersion { + return true + } + if data.DriverVersion < driverVersion { + return false + } + + return false +} + +// DrvInfoDetailData is driver information details structure (provides detailed information about a particular driver information structure) +type DrvInfoDetailData struct { + size uint32 // Use unsafeSizeOf method + InfDate Filetime + compatIDsOffset uint32 + compatIDsLength uint32 + _ uintptr + sectionName [LINE_LEN]uint16 + infFileName [MAX_PATH]uint16 + drvDescription [LINE_LEN]uint16 + hardwareID [1]uint16 +} + +func (*DrvInfoDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DrvInfoDetailData{}.hardwareID) + unsafe.Sizeof(DrvInfoDetailData{}.hardwareID)) + } + return uint32(unsafe.Sizeof(DrvInfoDetailData{})) +} + +func (data *DrvInfoDetailData) SectionName() string { + return UTF16ToString(data.sectionName[:]) +} + +func (data *DrvInfoDetailData) InfFileName() string { + return UTF16ToString(data.infFileName[:]) +} + +func (data *DrvInfoDetailData) DrvDescription() string { + return UTF16ToString(data.drvDescription[:]) +} + +func (data *DrvInfoDetailData) HardwareID() string { + if data.compatIDsOffset > 1 { + bufW := data.getBuf() + return UTF16ToString(bufW[:wcslen(bufW)]) + } + + return "" +} + +func (data *DrvInfoDetailData) CompatIDs() []string { + a := make([]string, 0) + + if data.compatIDsLength > 0 { + bufW := data.getBuf() + bufW = bufW[data.compatIDsOffset : data.compatIDsOffset+data.compatIDsLength] + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + } + + return a +} + +func (data *DrvInfoDetailData) getBuf() []uint16 { + len := (data.size - uint32(unsafe.Offsetof(data.hardwareID))) / 2 + sl := struct { + addr *uint16 + len int + cap int + }{&data.hardwareID[0], int(len), int(len)} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// IsCompatible method tests if given hardware ID matches the driver or is listed on the compatible ID list. +func (data *DrvInfoDetailData) IsCompatible(hwid string) bool { + hwidLC := strings.ToLower(hwid) + if strings.ToLower(data.HardwareID()) == hwidLC { + return true + } + a := data.CompatIDs() + for i := range a { + if strings.ToLower(a[i]) == hwidLC { + return true + } + } + + return false +} + +// DICD flags control SetupDiCreateDeviceInfo +type DICD uint32 + +const ( + DICD_GENERATE_ID DICD = 0x00000001 + DICD_INHERIT_CLASSDRVS DICD = 0x00000002 +) + +// SUOI flags control SetupUninstallOEMInf +type SUOI uint32 + +const ( + SUOI_FORCEDELETE SUOI = 0x0001 +) + +// SPDIT flags to distinguish between class drivers and +// device drivers. (Passed in 'DriverType' parameter of +// driver information list APIs) +type SPDIT uint32 + +const ( + SPDIT_NODRIVER SPDIT = 0x00000000 + SPDIT_CLASSDRIVER SPDIT = 0x00000001 + SPDIT_COMPATDRIVER SPDIT = 0x00000002 +) + +// DIGCF flags control what is included in the device information set built by SetupDiGetClassDevs +type DIGCF uint32 + +const ( + DIGCF_DEFAULT DIGCF = 0x00000001 // only valid with DIGCF_DEVICEINTERFACE + DIGCF_PRESENT DIGCF = 0x00000002 + DIGCF_ALLCLASSES DIGCF = 0x00000004 + DIGCF_PROFILE DIGCF = 0x00000008 + DIGCF_DEVICEINTERFACE DIGCF = 0x00000010 +) + +// DIREG specifies values for SetupDiCreateDevRegKey, SetupDiOpenDevRegKey, and SetupDiDeleteDevRegKey. +type DIREG uint32 + +const ( + DIREG_DEV DIREG = 0x00000001 // Open/Create/Delete device key + DIREG_DRV DIREG = 0x00000002 // Open/Create/Delete driver key + DIREG_BOTH DIREG = 0x00000004 // Delete both driver and Device key +) + +// SPDRP specifies device registry property codes +// (Codes marked as read-only (R) may only be used for +// SetupDiGetDeviceRegistryProperty) +// +// These values should cover the same set of registry properties +// as defined by the CM_DRP codes in cfgmgr32.h. +// +// Note that SPDRP codes are zero based while CM_DRP codes are one based! +type SPDRP uint32 + +const ( + SPDRP_DEVICEDESC SPDRP = 0x00000000 // DeviceDesc (R/W) + SPDRP_HARDWAREID SPDRP = 0x00000001 // HardwareID (R/W) + SPDRP_COMPATIBLEIDS SPDRP = 0x00000002 // CompatibleIDs (R/W) + SPDRP_SERVICE SPDRP = 0x00000004 // Service (R/W) + SPDRP_CLASS SPDRP = 0x00000007 // Class (R--tied to ClassGUID) + SPDRP_CLASSGUID SPDRP = 0x00000008 // ClassGUID (R/W) + SPDRP_DRIVER SPDRP = 0x00000009 // Driver (R/W) + SPDRP_CONFIGFLAGS SPDRP = 0x0000000A // ConfigFlags (R/W) + SPDRP_MFG SPDRP = 0x0000000B // Mfg (R/W) + SPDRP_FRIENDLYNAME SPDRP = 0x0000000C // FriendlyName (R/W) + SPDRP_LOCATION_INFORMATION SPDRP = 0x0000000D // LocationInformation (R/W) + SPDRP_PHYSICAL_DEVICE_OBJECT_NAME SPDRP = 0x0000000E // PhysicalDeviceObjectName (R) + SPDRP_CAPABILITIES SPDRP = 0x0000000F // Capabilities (R) + SPDRP_UI_NUMBER SPDRP = 0x00000010 // UiNumber (R) + SPDRP_UPPERFILTERS SPDRP = 0x00000011 // UpperFilters (R/W) + SPDRP_LOWERFILTERS SPDRP = 0x00000012 // LowerFilters (R/W) + SPDRP_BUSTYPEGUID SPDRP = 0x00000013 // BusTypeGUID (R) + SPDRP_LEGACYBUSTYPE SPDRP = 0x00000014 // LegacyBusType (R) + SPDRP_BUSNUMBER SPDRP = 0x00000015 // BusNumber (R) + SPDRP_ENUMERATOR_NAME SPDRP = 0x00000016 // Enumerator Name (R) + SPDRP_SECURITY SPDRP = 0x00000017 // Security (R/W, binary form) + SPDRP_SECURITY_SDS SPDRP = 0x00000018 // Security (W, SDS form) + SPDRP_DEVTYPE SPDRP = 0x00000019 // Device Type (R/W) + SPDRP_EXCLUSIVE SPDRP = 0x0000001A // Device is exclusive-access (R/W) + SPDRP_CHARACTERISTICS SPDRP = 0x0000001B // Device Characteristics (R/W) + SPDRP_ADDRESS SPDRP = 0x0000001C // Device Address (R) + SPDRP_UI_NUMBER_DESC_FORMAT SPDRP = 0x0000001D // UiNumberDescFormat (R/W) + SPDRP_DEVICE_POWER_DATA SPDRP = 0x0000001E // Device Power Data (R) + SPDRP_REMOVAL_POLICY SPDRP = 0x0000001F // Removal Policy (R) + SPDRP_REMOVAL_POLICY_HW_DEFAULT SPDRP = 0x00000020 // Hardware Removal Policy (R) + SPDRP_REMOVAL_POLICY_OVERRIDE SPDRP = 0x00000021 // Removal Policy Override (RW) + SPDRP_INSTALL_STATE SPDRP = 0x00000022 // Device Install State (R) + SPDRP_LOCATION_PATHS SPDRP = 0x00000023 // Device Location Paths (R) + SPDRP_BASE_CONTAINERID SPDRP = 0x00000024 // Base ContainerID (R) + + SPDRP_MAXIMUM_PROPERTY SPDRP = 0x00000025 // Upper bound on ordinals +) + +// DEVPROPTYPE represents the property-data-type identifier that specifies the +// data type of a device property value in the unified device property model. +type DEVPROPTYPE uint32 + +const ( + DEVPROP_TYPEMOD_ARRAY DEVPROPTYPE = 0x00001000 + DEVPROP_TYPEMOD_LIST DEVPROPTYPE = 0x00002000 + + DEVPROP_TYPE_EMPTY DEVPROPTYPE = 0x00000000 + DEVPROP_TYPE_NULL DEVPROPTYPE = 0x00000001 + DEVPROP_TYPE_SBYTE DEVPROPTYPE = 0x00000002 + DEVPROP_TYPE_BYTE DEVPROPTYPE = 0x00000003 + DEVPROP_TYPE_INT16 DEVPROPTYPE = 0x00000004 + DEVPROP_TYPE_UINT16 DEVPROPTYPE = 0x00000005 + DEVPROP_TYPE_INT32 DEVPROPTYPE = 0x00000006 + DEVPROP_TYPE_UINT32 DEVPROPTYPE = 0x00000007 + DEVPROP_TYPE_INT64 DEVPROPTYPE = 0x00000008 + DEVPROP_TYPE_UINT64 DEVPROPTYPE = 0x00000009 + DEVPROP_TYPE_FLOAT DEVPROPTYPE = 0x0000000A + DEVPROP_TYPE_DOUBLE DEVPROPTYPE = 0x0000000B + DEVPROP_TYPE_DECIMAL DEVPROPTYPE = 0x0000000C + DEVPROP_TYPE_GUID DEVPROPTYPE = 0x0000000D + DEVPROP_TYPE_CURRENCY DEVPROPTYPE = 0x0000000E + DEVPROP_TYPE_DATE DEVPROPTYPE = 0x0000000F + DEVPROP_TYPE_FILETIME DEVPROPTYPE = 0x00000010 + DEVPROP_TYPE_BOOLEAN DEVPROPTYPE = 0x00000011 + DEVPROP_TYPE_STRING DEVPROPTYPE = 0x00000012 + DEVPROP_TYPE_STRING_LIST DEVPROPTYPE = DEVPROP_TYPE_STRING | DEVPROP_TYPEMOD_LIST + DEVPROP_TYPE_SECURITY_DESCRIPTOR DEVPROPTYPE = 0x00000013 + DEVPROP_TYPE_SECURITY_DESCRIPTOR_STRING DEVPROPTYPE = 0x00000014 + DEVPROP_TYPE_DEVPROPKEY DEVPROPTYPE = 0x00000015 + DEVPROP_TYPE_DEVPROPTYPE DEVPROPTYPE = 0x00000016 + DEVPROP_TYPE_BINARY DEVPROPTYPE = DEVPROP_TYPE_BYTE | DEVPROP_TYPEMOD_ARRAY + DEVPROP_TYPE_ERROR DEVPROPTYPE = 0x00000017 + DEVPROP_TYPE_NTSTATUS DEVPROPTYPE = 0x00000018 + DEVPROP_TYPE_STRING_INDIRECT DEVPROPTYPE = 0x00000019 + + MAX_DEVPROP_TYPE DEVPROPTYPE = 0x00000019 + MAX_DEVPROP_TYPEMOD DEVPROPTYPE = 0x00002000 + + DEVPROP_MASK_TYPE DEVPROPTYPE = 0x00000FFF + DEVPROP_MASK_TYPEMOD DEVPROPTYPE = 0x0000F000 +) + +// DEVPROPGUID specifies a property category. +type DEVPROPGUID GUID + +// DEVPROPID uniquely identifies the property within the property category. +type DEVPROPID uint32 + +const DEVPROPID_FIRST_USABLE DEVPROPID = 2 + +// DEVPROPKEY represents a device property key for a device property in the +// unified device property model. +type DEVPROPKEY struct { + FmtID DEVPROPGUID + PID DEVPROPID +} + +// CONFIGRET is a return value or error code from cfgmgr32 APIs +type CONFIGRET uint32 + +func (ret CONFIGRET) Error() string { + if win32Error, ok := ret.Unwrap().(Errno); ok { + return fmt.Sprintf("%s (CfgMgr error: 0x%08x)", win32Error.Error(), uint32(ret)) + } + return fmt.Sprintf("CfgMgr error: 0x%08x", uint32(ret)) +} + +func (ret CONFIGRET) Win32Error(defaultError Errno) Errno { + return cm_MapCrToWin32Err(ret, defaultError) +} + +func (ret CONFIGRET) Unwrap() error { + const noMatch = Errno(^uintptr(0)) + win32Error := ret.Win32Error(noMatch) + if win32Error == noMatch { + return nil + } + return win32Error +} + +const ( + CR_SUCCESS CONFIGRET = 0x00000000 + CR_DEFAULT CONFIGRET = 0x00000001 + CR_OUT_OF_MEMORY CONFIGRET = 0x00000002 + CR_INVALID_POINTER CONFIGRET = 0x00000003 + CR_INVALID_FLAG CONFIGRET = 0x00000004 + CR_INVALID_DEVNODE CONFIGRET = 0x00000005 + CR_INVALID_DEVINST = CR_INVALID_DEVNODE + CR_INVALID_RES_DES CONFIGRET = 0x00000006 + CR_INVALID_LOG_CONF CONFIGRET = 0x00000007 + CR_INVALID_ARBITRATOR CONFIGRET = 0x00000008 + CR_INVALID_NODELIST CONFIGRET = 0x00000009 + CR_DEVNODE_HAS_REQS CONFIGRET = 0x0000000A + CR_DEVINST_HAS_REQS = CR_DEVNODE_HAS_REQS + CR_INVALID_RESOURCEID CONFIGRET = 0x0000000B + CR_DLVXD_NOT_FOUND CONFIGRET = 0x0000000C + CR_NO_SUCH_DEVNODE CONFIGRET = 0x0000000D + CR_NO_SUCH_DEVINST = CR_NO_SUCH_DEVNODE + CR_NO_MORE_LOG_CONF CONFIGRET = 0x0000000E + CR_NO_MORE_RES_DES CONFIGRET = 0x0000000F + CR_ALREADY_SUCH_DEVNODE CONFIGRET = 0x00000010 + CR_ALREADY_SUCH_DEVINST = CR_ALREADY_SUCH_DEVNODE + CR_INVALID_RANGE_LIST CONFIGRET = 0x00000011 + CR_INVALID_RANGE CONFIGRET = 0x00000012 + CR_FAILURE CONFIGRET = 0x00000013 + CR_NO_SUCH_LOGICAL_DEV CONFIGRET = 0x00000014 + CR_CREATE_BLOCKED CONFIGRET = 0x00000015 + CR_NOT_SYSTEM_VM CONFIGRET = 0x00000016 + CR_REMOVE_VETOED CONFIGRET = 0x00000017 + CR_APM_VETOED CONFIGRET = 0x00000018 + CR_INVALID_LOAD_TYPE CONFIGRET = 0x00000019 + CR_BUFFER_SMALL CONFIGRET = 0x0000001A + CR_NO_ARBITRATOR CONFIGRET = 0x0000001B + CR_NO_REGISTRY_HANDLE CONFIGRET = 0x0000001C + CR_REGISTRY_ERROR CONFIGRET = 0x0000001D + CR_INVALID_DEVICE_ID CONFIGRET = 0x0000001E + CR_INVALID_DATA CONFIGRET = 0x0000001F + CR_INVALID_API CONFIGRET = 0x00000020 + CR_DEVLOADER_NOT_READY CONFIGRET = 0x00000021 + CR_NEED_RESTART CONFIGRET = 0x00000022 + CR_NO_MORE_HW_PROFILES CONFIGRET = 0x00000023 + CR_DEVICE_NOT_THERE CONFIGRET = 0x00000024 + CR_NO_SUCH_VALUE CONFIGRET = 0x00000025 + CR_WRONG_TYPE CONFIGRET = 0x00000026 + CR_INVALID_PRIORITY CONFIGRET = 0x00000027 + CR_NOT_DISABLEABLE CONFIGRET = 0x00000028 + CR_FREE_RESOURCES CONFIGRET = 0x00000029 + CR_QUERY_VETOED CONFIGRET = 0x0000002A + CR_CANT_SHARE_IRQ CONFIGRET = 0x0000002B + CR_NO_DEPENDENT CONFIGRET = 0x0000002C + CR_SAME_RESOURCES CONFIGRET = 0x0000002D + CR_NO_SUCH_REGISTRY_KEY CONFIGRET = 0x0000002E + CR_INVALID_MACHINENAME CONFIGRET = 0x0000002F + CR_REMOTE_COMM_FAILURE CONFIGRET = 0x00000030 + CR_MACHINE_UNAVAILABLE CONFIGRET = 0x00000031 + CR_NO_CM_SERVICES CONFIGRET = 0x00000032 + CR_ACCESS_DENIED CONFIGRET = 0x00000033 + CR_CALL_NOT_IMPLEMENTED CONFIGRET = 0x00000034 + CR_INVALID_PROPERTY CONFIGRET = 0x00000035 + CR_DEVICE_INTERFACE_ACTIVE CONFIGRET = 0x00000036 + CR_NO_SUCH_DEVICE_INTERFACE CONFIGRET = 0x00000037 + CR_INVALID_REFERENCE_STRING CONFIGRET = 0x00000038 + CR_INVALID_CONFLICT_LIST CONFIGRET = 0x00000039 + CR_INVALID_INDEX CONFIGRET = 0x0000003A + CR_INVALID_STRUCTURE_SIZE CONFIGRET = 0x0000003B + NUM_CR_RESULTS CONFIGRET = 0x0000003C +) + +const ( + CM_GET_DEVICE_INTERFACE_LIST_PRESENT = 0 // only currently 'live' device interfaces + CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1 // all registered device interfaces, live or not +) + +const ( + DN_ROOT_ENUMERATED = 0x00000001 // Was enumerated by ROOT + DN_DRIVER_LOADED = 0x00000002 // Has Register_Device_Driver + DN_ENUM_LOADED = 0x00000004 // Has Register_Enumerator + DN_STARTED = 0x00000008 // Is currently configured + DN_MANUAL = 0x00000010 // Manually installed + DN_NEED_TO_ENUM = 0x00000020 // May need reenumeration + DN_NOT_FIRST_TIME = 0x00000040 // Has received a config + DN_HARDWARE_ENUM = 0x00000080 // Enum generates hardware ID + DN_LIAR = 0x00000100 // Lied about can reconfig once + DN_HAS_MARK = 0x00000200 // Not CM_Create_DevInst lately + DN_HAS_PROBLEM = 0x00000400 // Need device installer + DN_FILTERED = 0x00000800 // Is filtered + DN_MOVED = 0x00001000 // Has been moved + DN_DISABLEABLE = 0x00002000 // Can be disabled + DN_REMOVABLE = 0x00004000 // Can be removed + DN_PRIVATE_PROBLEM = 0x00008000 // Has a private problem + DN_MF_PARENT = 0x00010000 // Multi function parent + DN_MF_CHILD = 0x00020000 // Multi function child + DN_WILL_BE_REMOVED = 0x00040000 // DevInst is being removed + DN_NOT_FIRST_TIMEE = 0x00080000 // Has received a config enumerate + DN_STOP_FREE_RES = 0x00100000 // When child is stopped, free resources + DN_REBAL_CANDIDATE = 0x00200000 // Don't skip during rebalance + DN_BAD_PARTIAL = 0x00400000 // This devnode's log_confs do not have same resources + DN_NT_ENUMERATOR = 0x00800000 // This devnode's is an NT enumerator + DN_NT_DRIVER = 0x01000000 // This devnode's is an NT driver + DN_NEEDS_LOCKING = 0x02000000 // Devnode need lock resume processing + DN_ARM_WAKEUP = 0x04000000 // Devnode can be the wakeup device + DN_APM_ENUMERATOR = 0x08000000 // APM aware enumerator + DN_APM_DRIVER = 0x10000000 // APM aware driver + DN_SILENT_INSTALL = 0x20000000 // Silent install + DN_NO_SHOW_IN_DM = 0x40000000 // No show in device manager + DN_BOOT_LOG_PROB = 0x80000000 // Had a problem during preassignment of boot log conf + DN_NEED_RESTART = DN_LIAR // System needs to be restarted for this Devnode to work properly + DN_DRIVER_BLOCKED = DN_NOT_FIRST_TIME // One or more drivers are blocked from loading for this Devnode + DN_LEGACY_DRIVER = DN_MOVED // This device is using a legacy driver + DN_CHILD_WITH_INVALID_ID = DN_HAS_MARK // One or more children have invalid IDs + DN_DEVICE_DISCONNECTED = DN_NEEDS_LOCKING // The function driver for a device reported that the device is not connected. Typically this means a wireless device is out of range. + DN_QUERY_REMOVE_PENDING = DN_MF_PARENT // Device is part of a set of related devices collectively pending query-removal + DN_QUERY_REMOVE_ACTIVE = DN_MF_CHILD // Device is actively engaged in a query-remove IRP + DN_CHANGEABLE_FLAGS = DN_NOT_FIRST_TIME | DN_HARDWARE_ENUM | DN_HAS_MARK | DN_DISABLEABLE | DN_REMOVABLE | DN_MF_CHILD | DN_MF_PARENT | DN_NOT_FIRST_TIMEE | DN_STOP_FREE_RES | DN_REBAL_CANDIDATE | DN_NT_ENUMERATOR | DN_NT_DRIVER | DN_SILENT_INSTALL | DN_NO_SHOW_IN_DM +) + +//sys setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW + +// SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class. +func SetupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) { + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0) +} + +//sys setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW + +// SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) { + data := &DevInfoListDetailData{} + data.size = data.unsafeSizeOf() + + return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data) +} + +// DeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func (deviceInfoSet DevInfo) DeviceInfoListDetail() (*DevInfoListDetailData, error) { + return SetupDiGetDeviceInfoListDetail(deviceInfoSet) +} + +//sys setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW + +// SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set. +func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) { + deviceNameUTF16, err := UTF16PtrFromString(deviceName) + if err != nil { + return + } + + var deviceDescriptionUTF16 *uint16 + if deviceDescription != "" { + deviceDescriptionUTF16, err = UTF16PtrFromString(deviceDescription) + if err != nil { + return + } + } + + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data) +} + +// CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set. +func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) { + return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags) +} + +//sys setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo + +// SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set. +func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data) +} + +// EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set. +func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) { + return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex) +} + +// SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory. +//sys SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList + +// Close method deletes a device information set and frees all associated memory. +func (deviceInfoSet DevInfo) Close() error { + return SetupDiDestroyDeviceInfoList(deviceInfoSet) +} + +//sys SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList + +// BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set. +func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch + +// CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread. +func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error { + return SetupDiCancelDriverInfoSearch(deviceInfoSet) +} + +//sys setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW + +// SetupDiEnumDriverInfo function enumerates the members of a driver list. +func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data) +} + +// EnumDriverInfo method enumerates the members of a driver list. +func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex) +} + +//sys setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW + +// SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element. +func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data) +} + +// SelectedDriver method retrieves the selected driver for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) { + return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData) +} + +//sys SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW + +// SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set. +func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error { + return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW + +// SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set. +func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + reqSize := uint32(2048) + for { + buf := make([]byte, reqSize) + data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) + data.size = data.unsafeSizeOf() + err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + data.size = reqSize + return data, nil + } +} + +// DriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set. +func (deviceInfoSet DevInfo) DriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList + +// DestroyDriverInfoList method deletes a driver list. +func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiGetClassDevsExW + +// SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer. +func SetupDiGetClassDevsEx(classGUID *GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) { + var enumeratorUTF16 *uint16 + if enumerator != "" { + enumeratorUTF16, err = UTF16PtrFromString(enumerator) + if err != nil { + return + } + } + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0) +} + +// SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +//sys SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller + +// CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error { + return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData) +} + +// SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information. +//sys SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) [failretval==InvalidHandle] = setupapi.SetupDiOpenDevRegKey + +// OpenDevRegKey method opens a registry key for device-specific configuration information. +func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (Handle, error) { + return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired) +} + +//sys setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) = setupapi.SetupDiGetDevicePropertyW + +// SetupDiGetDeviceProperty function retrieves a specified device instance property. +func SetupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType DEVPROPTYPE + buf := make([]byte, reqSize) + err = setupDiGetDeviceProperty(deviceInfoSet, deviceInfoData, propertyKey, &dataType, &buf[0], uint32(len(buf)), &reqSize, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + switch dataType { + case DEVPROP_TYPE_STRING: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + } + return nil, errors.New("unimplemented property type") + } +} + +//sys setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW + +// SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property. +func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType uint32 + buf := make([]byte, reqSize) + err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + return getRegistryValue(buf[:reqSize], dataType) + } +} + +func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) { + switch dataType { + case REG_SZ: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + case REG_EXPAND_SZ: + value := UTF16ToString(bufToUTF16(buf)) + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + ret := make([]uint16, 100) + for { + n, err := ExpandEnvironmentStrings(p, &ret[0], uint32(len(ret))) + if err != nil { + return "", err + } + if n <= uint32(len(ret)) { + return UTF16ToString(ret[:n]), nil + } + ret = make([]uint16, n) + } + case REG_BINARY: + return buf, nil + case REG_DWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint32(buf), nil + case REG_DWORD_BIG_ENDIAN: + return binary.BigEndian.Uint32(buf), nil + case REG_MULTI_SZ: + bufW := bufToUTF16(buf) + a := []string{} + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + runtime.KeepAlive(buf) + return a, nil + case REG_QWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint64(buf), nil + default: + return nil, fmt.Errorf("Unsupported registry value type: %v", dataType) + } +} + +// bufToUTF16 function reinterprets []byte buffer as []uint16 +func bufToUTF16(buf []byte) []uint16 { + sl := struct { + addr *uint16 + len int + cap int + }{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// utf16ToBuf function reinterprets []uint16 as []byte +func utf16ToBuf(buf []uint16) []byte { + sl := struct { + addr *byte + len int + cap int + }{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2} + return *(*[]byte)(unsafe.Pointer(&sl)) +} + +func wcslen(str []uint16) int { + for i := 0; i < len(str); i++ { + if str[i] == 0 { + return i + } + } + return len(str) +} + +// DeviceRegistryProperty method retrieves a specified Plug and Play device property. +func (deviceInfoSet DevInfo) DeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) { + return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property) +} + +//sys setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW + +// SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers))) +} + +// SetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers) +} + +// SetDeviceRegistryPropertyString method sets a Plug and Play device property string for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryPropertyString(deviceInfoData *DevInfoData, property SPDRP, str string) error { + str16, err := UTF16FromString(str) + if err != nil { + return err + } + err = SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, utf16ToBuf(append(str16, 0))) + runtime.KeepAlive(str16) + return err +} + +//sys setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW + +// SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element. +func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) { + params := &DevInstallParams{} + params.size = uint32(unsafe.Sizeof(*params)) + + return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params) +} + +// DeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) DeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) { + return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData) +} + +//sys setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceInstanceIdW + +// SetupDiGetDeviceInstanceId function retrieves the instance ID of the device. +func SetupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (string, error) { + reqSize := uint32(1024) + for { + buf := make([]uint16, reqSize) + err := setupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return "", err + } + return UTF16ToString(buf), nil + } +} + +// DeviceInstanceID method retrieves the instance ID of the device. +func (deviceInfoSet DevInfo) DeviceInstanceID(deviceInfoData *DevInfoData) (string, error) { + return SetupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData) +} + +// SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element. +//sys SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW + +// ClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) ClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error { + return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize) +} + +//sys SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW + +// SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error { + return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams) +} + +// SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element. +//sys SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW + +// SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error { + return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize) +} + +//sys setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW + +// SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer. +func SetupDiClassNameFromGuidEx(classGUID *GUID, machineName string) (className string, err error) { + var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16 + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + + err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0) + if err != nil { + return + } + + className = UTF16ToString(classNameUTF16[:]) + return +} + +//sys setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW + +// SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer. +func SetupDiClassGuidsFromNameEx(className string, machineName string) ([]GUID, error) { + classNameUTF16, err := UTF16PtrFromString(className) + if err != nil { + return nil, err + } + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return nil, err + } + } + + reqSize := uint32(4) + for { + buf := make([]GUID, reqSize) + err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], uint32(len(buf)), &reqSize, machineNameUTF16, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + return buf[:reqSize], nil + } +} + +//sys setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice + +// SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set. +func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDevice(deviceInfoSet, data) +} + +// SelectedDevice method retrieves the selected device information element in a device information set. +func (deviceInfoSet DevInfo) SelectedDevice() (*DevInfoData, error) { + return SetupDiGetSelectedDevice(deviceInfoSet) +} + +// SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +//sys SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice + +// SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error { + return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData) +} + +//sys setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) = setupapi.SetupUninstallOEMInfW + +// SetupUninstallOEMInf uninstalls the specified driver. +func SetupUninstallOEMInf(infFileName string, flags SUOI) error { + infFileName16, err := UTF16PtrFromString(infFileName) + if err != nil { + return err + } + return setupUninstallOEMInf(infFileName16, flags, 0) +} + +//sys cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) = CfgMgr32.CM_MapCrToWin32Err + +//sys cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_List_SizeW +//sys cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_ListW + +func CM_Get_Device_Interface_List(deviceID string, interfaceClass *GUID, flags uint32) ([]string, error) { + deviceID16, err := UTF16PtrFromString(deviceID) + if err != nil { + return nil, err + } + var buf []uint16 + var buflen uint32 + for { + if ret := cm_Get_Device_Interface_List_Size(&buflen, interfaceClass, deviceID16, flags); ret != CR_SUCCESS { + return nil, ret + } + buf = make([]uint16, buflen) + if ret := cm_Get_Device_Interface_List(interfaceClass, deviceID16, &buf[0], buflen, flags); ret == CR_SUCCESS { + break + } else if ret != CR_BUFFER_SMALL { + return nil, ret + } + } + var interfaces []string + for i := 0; i < len(buf); { + j := i + wcslen(buf[i:]) + if i < j { + interfaces = append(interfaces, UTF16ToString(buf[i:j])) + } + i = j + 1 + } + if interfaces == nil { + return nil, ERROR_NO_SUCH_DEVICE_INTERFACE + } + return interfaces, nil +} + +//sys cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_DevNode_Status + +func CM_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) error { + ret := cm_Get_DevNode_Status(status, problemNumber, devInst, flags) + if ret == CR_SUCCESS { + return nil + } + return ret +} diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go deleted file mode 100644 index 1681810e0..000000000 --- a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import "syscall" - -const ( - ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 - ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 - ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 - ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 - ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 - ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 - ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 - ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 - ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 - ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 - ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 - ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 - ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 - ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 - ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 - ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 - ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 - ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 - ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A - ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B - ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C - ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D - ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E - ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F - ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 - ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 - ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 - ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 - ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 - ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 - ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 - ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 - ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 - ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 - ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A - ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B - ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C - ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D - ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E - ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F - ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 - ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 - ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 - ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 - ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 - ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 - ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 - ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 - ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 - ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 - ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A - ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B - ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C - ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D - ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E - ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F - ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 - ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 - ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 - ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 - ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 - ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 - ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 - ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 - ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 - ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A - ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B - ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C - ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D - ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E - ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F - ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 - ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 - ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 - ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 - ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 - ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 - ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 - ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 - ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 - ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 - ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A - ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B - ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C - ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 - EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW - ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE - ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE - ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED - ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE -) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 42b494555..200b62a00 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -248,6 +248,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW //sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 @@ -322,6 +323,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW +//sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW //sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW //sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW //sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) @@ -423,6 +426,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString //sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile //sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) = ntdll.NtSetInformationFile //sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus //sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus //sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl @@ -892,9 +896,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } @@ -914,9 +916,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } @@ -989,9 +989,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -1000,9 +998,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, syscall.EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 53e215227..bb31abda4 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -156,6 +156,8 @@ const ( MAX_PATH = 260 MAX_LONG_PATH = 32768 + MAX_MODULE_NAME32 = 255 + MAX_COMPUTERNAME_LENGTH = 15 TIME_ZONE_ID_UNKNOWN = 0 @@ -936,8 +938,8 @@ type StartupInfoEx struct { type ProcThreadAttributeList struct{} type ProcThreadAttributeListContainer struct { - data *ProcThreadAttributeList - heapAllocations []uintptr + data *ProcThreadAttributeList + pointers []unsafe.Pointer } type ProcessInformation struct { @@ -970,6 +972,21 @@ type ThreadEntry32 struct { Flags uint32 } +type ModuleEntry32 struct { + Size uint32 + ModuleID uint32 + ProcessID uint32 + GlblcntUsage uint32 + ProccntUsage uint32 + ModBaseAddr uintptr + ModBaseSize uint32 + ModuleHandle Handle + Module [MAX_MODULE_NAME32 + 1]uint16 + ExePath [MAX_PATH]uint16 +} + +const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{}) + type Systemtime struct { Year uint16 Month uint16 @@ -2562,6 +2579,60 @@ const ( FILE_PIPE_SERVER_END = 0x00000001 ) +const ( + // FileInformationClass for NtSetInformationFile + FileBasicInformation = 4 + FileRenameInformation = 10 + FileDispositionInformation = 13 + FilePositionInformation = 14 + FileEndOfFileInformation = 20 + FileValidDataLengthInformation = 39 + FileShortNameInformation = 40 + FileIoPriorityHintInformation = 43 + FileReplaceCompletionInformation = 61 + FileDispositionInformationEx = 64 + FileCaseSensitiveInformation = 71 + FileLinkInformation = 72 + FileCaseSensitiveInformationForceAccessCheck = 75 + FileKnownFolderInformation = 76 + + // Flags for FILE_RENAME_INFORMATION + FILE_RENAME_REPLACE_IF_EXISTS = 0x00000001 + FILE_RENAME_POSIX_SEMANTICS = 0x00000002 + FILE_RENAME_SUPPRESS_PIN_STATE_INHERITANCE = 0x00000004 + FILE_RENAME_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 + FILE_RENAME_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 + FILE_RENAME_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 + FILE_RENAME_PRESERVE_AVAILABLE_SPACE = 0x00000030 + FILE_RENAME_IGNORE_READONLY_ATTRIBUTE = 0x00000040 + FILE_RENAME_FORCE_RESIZE_TARGET_SR = 0x00000080 + FILE_RENAME_FORCE_RESIZE_SOURCE_SR = 0x00000100 + FILE_RENAME_FORCE_RESIZE_SR = 0x00000180 + + // Flags for FILE_DISPOSITION_INFORMATION_EX + FILE_DISPOSITION_DO_NOT_DELETE = 0x00000000 + FILE_DISPOSITION_DELETE = 0x00000001 + FILE_DISPOSITION_POSIX_SEMANTICS = 0x00000002 + FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK = 0x00000004 + FILE_DISPOSITION_ON_CLOSE = 0x00000008 + FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE = 0x00000010 + + // Flags for FILE_CASE_SENSITIVE_INFORMATION + FILE_CS_FLAG_CASE_SENSITIVE_DIR = 0x00000001 + + // Flags for FILE_LINK_INFORMATION + FILE_LINK_REPLACE_IF_EXISTS = 0x00000001 + FILE_LINK_POSIX_SEMANTICS = 0x00000002 + FILE_LINK_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 + FILE_LINK_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 + FILE_LINK_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 + FILE_LINK_PRESERVE_AVAILABLE_SPACE = 0x00000030 + FILE_LINK_IGNORE_READONLY_ATTRIBUTE = 0x00000040 + FILE_LINK_FORCE_RESIZE_TARGET_SR = 0x00000080 + FILE_LINK_FORCE_RESIZE_SOURCE_SR = 0x00000100 + FILE_LINK_FORCE_RESIZE_SR = 0x00000180 +) + // ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess. const ( ProcessBasicInformation = iota @@ -2678,6 +2749,43 @@ type PROCESS_BASIC_INFORMATION struct { InheritedFromUniqueProcessId uintptr } +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 + NumberOfThreads uint32 + WorkingSetPrivateSize int64 + HardFaultCount uint32 + NumberOfThreadsHighWatermark uint32 + CycleTime uint64 + CreateTime int64 + UserTime int64 + KernelTime int64 + ImageName NTUnicodeString + BasePriority int32 + UniqueProcessID uintptr + InheritedFromUniqueProcessID uintptr + HandleCount uint32 + SessionID uint32 + UniqueProcessKey *uint32 + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivatePageCount uintptr + ReadOperationCount int64 + WriteOperationCount int64 + OtherOperationCount int64 + ReadTransferCount int64 + WriteTransferCount int64 + OtherTransferCount int64 +} + // SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation const ( SystemBasicInformation = iota diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 42e10dd38..1055d47ed 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -36,6 +36,7 @@ func errnoErr(e syscall.Errno) error { } var ( + modCfgMgr32 = NewLazySystemDLL("CfgMgr32.dll") modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") @@ -48,6 +49,7 @@ var ( modpsapi = NewLazySystemDLL("psapi.dll") modsechost = NewLazySystemDLL("sechost.dll") modsecur32 = NewLazySystemDLL("secur32.dll") + modsetupapi = NewLazySystemDLL("setupapi.dll") modshell32 = NewLazySystemDLL("shell32.dll") moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") @@ -56,6 +58,10 @@ var ( modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + procCM_Get_DevNode_Status = modCfgMgr32.NewProc("CM_Get_DevNode_Status") + procCM_Get_Device_Interface_ListW = modCfgMgr32.NewProc("CM_Get_Device_Interface_ListW") + procCM_Get_Device_Interface_List_SizeW = modCfgMgr32.NewProc("CM_Get_Device_Interface_List_SizeW") + procCM_MapCrToWin32Err = modCfgMgr32.NewProc("CM_MapCrToWin32Err") procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") @@ -115,6 +121,7 @@ var ( procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceDynamicInformation = modadvapi32.NewProc("QueryServiceDynamicInformation") procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") @@ -198,6 +205,7 @@ var ( procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procExitProcess = modkernel32.NewProc("ExitProcess") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") @@ -287,6 +295,8 @@ var ( procLockFileEx = modkernel32.NewProc("LockFileEx") procLockResource = modkernel32.NewProc("LockResource") procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procModule32FirstW = modkernel32.NewProc("Module32FirstW") + procModule32NextW = modkernel32.NewProc("Module32NextW") procMoveFileExW = modkernel32.NewProc("MoveFileExW") procMoveFileW = modkernel32.NewProc("MoveFileW") procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") @@ -368,6 +378,7 @@ var ( procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") procNtSetInformationProcess = modntdll.NewProc("NtSetInformationProcess") procNtSetSystemInformation = modntdll.NewProc("NtSetSystemInformation") procRtlAddFunctionTable = modntdll.NewProc("RtlAddFunctionTable") @@ -398,6 +409,34 @@ var ( procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procSetupDiBuildDriverInfoList = modsetupapi.NewProc("SetupDiBuildDriverInfoList") + procSetupDiCallClassInstaller = modsetupapi.NewProc("SetupDiCallClassInstaller") + procSetupDiCancelDriverInfoSearch = modsetupapi.NewProc("SetupDiCancelDriverInfoSearch") + procSetupDiClassGuidsFromNameExW = modsetupapi.NewProc("SetupDiClassGuidsFromNameExW") + procSetupDiClassNameFromGuidExW = modsetupapi.NewProc("SetupDiClassNameFromGuidExW") + procSetupDiCreateDeviceInfoListExW = modsetupapi.NewProc("SetupDiCreateDeviceInfoListExW") + procSetupDiCreateDeviceInfoW = modsetupapi.NewProc("SetupDiCreateDeviceInfoW") + procSetupDiDestroyDeviceInfoList = modsetupapi.NewProc("SetupDiDestroyDeviceInfoList") + procSetupDiDestroyDriverInfoList = modsetupapi.NewProc("SetupDiDestroyDriverInfoList") + procSetupDiEnumDeviceInfo = modsetupapi.NewProc("SetupDiEnumDeviceInfo") + procSetupDiEnumDriverInfoW = modsetupapi.NewProc("SetupDiEnumDriverInfoW") + procSetupDiGetClassDevsExW = modsetupapi.NewProc("SetupDiGetClassDevsExW") + procSetupDiGetClassInstallParamsW = modsetupapi.NewProc("SetupDiGetClassInstallParamsW") + procSetupDiGetDeviceInfoListDetailW = modsetupapi.NewProc("SetupDiGetDeviceInfoListDetailW") + procSetupDiGetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiGetDeviceInstallParamsW") + procSetupDiGetDeviceInstanceIdW = modsetupapi.NewProc("SetupDiGetDeviceInstanceIdW") + procSetupDiGetDevicePropertyW = modsetupapi.NewProc("SetupDiGetDevicePropertyW") + procSetupDiGetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiGetDeviceRegistryPropertyW") + procSetupDiGetDriverInfoDetailW = modsetupapi.NewProc("SetupDiGetDriverInfoDetailW") + procSetupDiGetSelectedDevice = modsetupapi.NewProc("SetupDiGetSelectedDevice") + procSetupDiGetSelectedDriverW = modsetupapi.NewProc("SetupDiGetSelectedDriverW") + procSetupDiOpenDevRegKey = modsetupapi.NewProc("SetupDiOpenDevRegKey") + procSetupDiSetClassInstallParamsW = modsetupapi.NewProc("SetupDiSetClassInstallParamsW") + procSetupDiSetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiSetDeviceInstallParamsW") + procSetupDiSetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiSetDeviceRegistryPropertyW") + procSetupDiSetSelectedDevice = modsetupapi.NewProc("SetupDiSetSelectedDevice") + procSetupDiSetSelectedDriverW = modsetupapi.NewProc("SetupDiSetSelectedDriverW") + procSetupUninstallOEMInfW = modsetupapi.NewProc("SetupUninstallOEMInfW") procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") @@ -445,6 +484,30 @@ var ( procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") ) +func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { + r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + ret = Errno(r0) + return +} + func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { var _p0 uint32 if resetToDefault { @@ -975,6 +1038,18 @@ func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, buf return } +func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) { + err = procQueryServiceDynamicInformation.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) if r1 == 0 { @@ -1702,6 +1777,15 @@ func ExitProcess(exitcode uint32) { return } +func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + func FindClose(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -2485,6 +2569,22 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui return } +func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { @@ -3186,6 +3286,14 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf return } +func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) if r0 != 0 { @@ -3410,6 +3518,233 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint return } +func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + key = Handle(r0) + if key == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) diff --git a/vendor/golang.org/x/term/go.mod b/vendor/golang.org/x/term/go.mod deleted file mode 100644 index d45f52851..000000000 --- a/vendor/golang.org/x/term/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module golang.org/x/term - -go 1.11 - -require golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 diff --git a/vendor/golang.org/x/term/go.sum b/vendor/golang.org/x/term/go.sum deleted file mode 100644 index de9e09c65..000000000 --- a/vendor/golang.org/x/term/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 563f70429..a98fe7782 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -53,10 +53,9 @@ func Every(interval time.Duration) Limit { // // The methods AllowN, ReserveN, and WaitN consume n tokens. type Limiter struct { - limit Limit - burst int - mu sync.Mutex + limit Limit + burst int tokens float64 // last is the last time the limiter's tokens field was updated last time.Time @@ -76,6 +75,8 @@ func (lim *Limiter) Limit() Limit { // Burst values allow more events to happen at once. // A zero Burst allows no events, unless limit == Inf. func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() return lim.burst } @@ -196,7 +197,7 @@ func (lim *Limiter) Reserve() *Reservation { // ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. // The Limiter takes this Reservation into account when allowing future events. -// ReserveN returns false if n exceeds the Limiter's burst size. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: // r := lim.ReserveN(time.Now(), 1) // if !r.OK() { @@ -229,7 +230,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { lim.mu.Unlock() if n > burst && limit != Inf { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) } // Check if ctx is already cancelled select { @@ -359,6 +360,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. +// advance requires that lim.mu is held. func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { last := lim.last if now.Before(last) { diff --git a/vendor/golang.zx2c4.com/wireguard/wgctrl/go.mod b/vendor/golang.zx2c4.com/wireguard/wgctrl/go.mod deleted file mode 100644 index 4c04d6416..000000000 --- a/vendor/golang.zx2c4.com/wireguard/wgctrl/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module golang.zx2c4.com/wireguard/wgctrl - -go 1.13 - -require ( - github.com/google/go-cmp v0.5.6 - github.com/mdlayher/genetlink v1.0.0 - github.com/mdlayher/netlink v1.4.1 - github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 - golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 - golang.zx2c4.com/wireguard v0.0.0-20210927201915-bb745b2ea326 -) diff --git a/vendor/golang.zx2c4.com/wireguard/wgctrl/go.sum b/vendor/golang.zx2c4.com/wireguard/wgctrl/go.sum deleted file mode 100644 index b0594c7ff..000000000 --- a/vendor/golang.zx2c4.com/wireguard/wgctrl/go.sum +++ /dev/null @@ -1,98 +0,0 @@ -github.com/cilium/ebpf v0.5.0 h1:E1KshmrMEtkMP2UjlWzfmUV1owWY+BnbL5FxxuatnrU= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 h1:uhL5Gw7BINiiPAo24A2sxkcDI0Jt/sqp1v5xQCniEFA= -github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= -github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= -github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= -github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= -github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= -github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190 h1:iycCSDo8EKVueI9sfVBBJmtNn9DnXV/K1YWwEJO+uOs= -github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= -github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= -github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= -github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= -github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= -github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= -github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= -github.com/mdlayher/netlink v1.4.1 h1:I154BCU+mKlIf7BgcAJB2r7QjveNPty6uNY1g9ChVfI= -github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= -github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 h1:qEtkL8n1DAHpi5/AOgAckwGQUlMe4+jhL/GMt+GKIks= -github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= -github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= -github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210927181540-4e4d966f7476 h1:s5hu7bTnLKswvidgtqc4GwsW83m9LZu8UAqzmWOZtI4= -golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 h1:foEbQz/B0Oz6YIqu/69kfXPYeFQAuuMYFkjaqXzl5Wo= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard v0.0.0-20210927201915-bb745b2ea326 h1:4yQQ5d6U5ozGB6n/WSDZa6B0XpPTmoQMtMDMoiZr4n0= -golang.zx2c4.com/wireguard v0.0.0-20210927201915-bb745b2ea326/go.mod h1:SDoazCvdy7RDjBPNEMBwrXhomlmtG7svs8mgwWEqtVI= diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 386aaa3bf..ce1183e91 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"F5McR9eEaw0XRpaO3M9gbIugkbs/SaxEFFt9Jrfs1w316mmn35GXCN4\"", + "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/8D3WWVngRoS0y9CFVIt9M0Wvgus\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -89,7 +89,7 @@ "acceleratorTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of accelerator types. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of accelerator types.", "httpMethod": "GET", "id": "compute.acceleratorTypes.aggregatedList", "parameterOrder": [ @@ -97,25 +97,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -138,7 +143,7 @@ ] }, "get": { - "description": "Returns the specified accelerator type. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified accelerator type.", "httpMethod": "GET", "id": "compute.acceleratorTypes.get", "parameterOrder": [ @@ -180,7 +185,7 @@ ] }, "list": { - "description": "Retrieves a list of accelerator types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of accelerator types available to the specified project.", "httpMethod": "GET", "id": "compute.acceleratorTypes.list", "parameterOrder": [ @@ -189,25 +194,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -241,7 +246,7 @@ "addresses": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of addresses. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of addresses.", "httpMethod": "GET", "id": "compute.addresses.aggregatedList", "parameterOrder": [ @@ -249,25 +254,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -290,7 +300,7 @@ ] }, "delete": { - "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified address resource.", "httpMethod": "DELETE", "id": "compute.addresses.delete", "parameterOrder": [ @@ -336,7 +346,7 @@ ] }, "get": { - "description": "Returns the specified address resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified address resource.", "httpMethod": "GET", "id": "compute.addresses.get", "parameterOrder": [ @@ -378,7 +388,7 @@ ] }, "insert": { - "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an address resource in the specified project by using the data included in the request.", "httpMethod": "POST", "id": "compute.addresses.insert", "parameterOrder": [ @@ -419,7 +429,7 @@ ] }, "list": { - "description": "Retrieves a list of addresses contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of addresses contained within the specified region.", "httpMethod": "GET", "id": "compute.addresses.list", "parameterOrder": [ @@ -428,25 +438,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -480,7 +490,7 @@ "autoscalers": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of autoscalers. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of autoscalers.", "httpMethod": "GET", "id": "compute.autoscalers.aggregatedList", "parameterOrder": [ @@ -488,25 +498,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -529,7 +544,7 @@ ] }, "delete": { - "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified autoscaler.", "httpMethod": "DELETE", "id": "compute.autoscalers.delete", "parameterOrder": [ @@ -575,7 +590,7 @@ ] }, "get": { - "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", "httpMethod": "GET", "id": "compute.autoscalers.get", "parameterOrder": [ @@ -617,7 +632,7 @@ ] }, "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an autoscaler in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.autoscalers.insert", "parameterOrder": [ @@ -658,7 +673,7 @@ ] }, "list": { - "description": "Retrieves a list of autoscalers contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of autoscalers contained within the specified zone.", "httpMethod": "GET", "id": "compute.autoscalers.list", "parameterOrder": [ @@ -667,25 +682,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -715,7 +730,7 @@ ] }, "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.autoscalers.patch", "parameterOrder": [ @@ -762,7 +777,7 @@ ] }, "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates an autoscaler in the specified project using the data included in the request.", "httpMethod": "PUT", "id": "compute.autoscalers.update", "parameterOrder": [ @@ -813,7 +828,7 @@ "backendBuckets": { "methods": { "addSignedUrlKey": { - "description": "Adds a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds a key for validating requests with signed URLs for this backend bucket.", "httpMethod": "POST", "id": "compute.backendBuckets.addSignedUrlKey", "parameterOrder": [ @@ -853,7 +868,7 @@ ] }, "delete": { - "description": "Deletes the specified BackendBucket resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified BackendBucket resource.", "httpMethod": "DELETE", "id": "compute.backendBuckets.delete", "parameterOrder": [ @@ -891,7 +906,7 @@ ] }, "deleteSignedUrlKey": { - "description": "Deletes a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", "httpMethod": "POST", "id": "compute.backendBuckets.deleteSignedUrlKey", "parameterOrder": [ @@ -935,7 +950,7 @@ ] }, "get": { - "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", "httpMethod": "GET", "id": "compute.backendBuckets.get", "parameterOrder": [ @@ -969,7 +984,7 @@ ] }, "insert": { - "description": "Creates a BackendBucket resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.backendBuckets.insert", "parameterOrder": [ @@ -1002,7 +1017,7 @@ ] }, "list": { - "description": "Retrieves the list of BackendBucket resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of BackendBucket resources available to the specified project.", "httpMethod": "GET", "id": "compute.backendBuckets.list", "parameterOrder": [ @@ -1010,25 +1025,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -1051,7 +1066,7 @@ ] }, "patch": { - "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.backendBuckets.patch", "parameterOrder": [ @@ -1092,7 +1107,7 @@ ] }, "update": { - "description": "Updates the specified BackendBucket resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified BackendBucket resource with the data included in the request.", "httpMethod": "PUT", "id": "compute.backendBuckets.update", "parameterOrder": [ @@ -1137,7 +1152,7 @@ "backendServices": { "methods": { "addSignedUrlKey": { - "description": "Adds a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds a key for validating requests with signed URLs for this backend service.", "httpMethod": "POST", "id": "compute.backendServices.addSignedUrlKey", "parameterOrder": [ @@ -1177,7 +1192,7 @@ ] }, "aggregatedList": { - "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", "httpMethod": "GET", "id": "compute.backendServices.aggregatedList", "parameterOrder": [ @@ -1185,25 +1200,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -1226,7 +1246,7 @@ ] }, "delete": { - "description": "Deletes the specified BackendService resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified BackendService resource.", "httpMethod": "DELETE", "id": "compute.backendServices.delete", "parameterOrder": [ @@ -1264,7 +1284,7 @@ ] }, "deleteSignedUrlKey": { - "description": "Deletes a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes a key for validating requests with signed URLs for this backend service.", "httpMethod": "POST", "id": "compute.backendServices.deleteSignedUrlKey", "parameterOrder": [ @@ -1308,7 +1328,7 @@ ] }, "get": { - "description": "Returns the specified BackendService resource. Gets a list of available backend services. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified BackendService resource. Gets a list of available backend services.", "httpMethod": "GET", "id": "compute.backendServices.get", "parameterOrder": [ @@ -1342,7 +1362,7 @@ ] }, "getHealth": { - "description": "Gets the most recent health check results for this BackendService. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the most recent health check results for this BackendService.", "httpMethod": "POST", "id": "compute.backendServices.getHealth", "parameterOrder": [ @@ -1378,7 +1398,7 @@ ] }, "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Understanding backend services for more information.", "httpMethod": "POST", "id": "compute.backendServices.insert", "parameterOrder": [ @@ -1411,7 +1431,7 @@ ] }, "list": { - "description": "Retrieves the list of BackendService resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of BackendService resources available to the specified project.", "httpMethod": "GET", "id": "compute.backendServices.list", "parameterOrder": [ @@ -1419,25 +1439,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -1460,7 +1480,7 @@ ] }, "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.backendServices.patch", "parameterOrder": [ @@ -1501,7 +1521,7 @@ ] }, "setSecurityPolicy": { - "description": "Sets the security policy for the specified backend service. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the security policy for the specified backend service.", "httpMethod": "POST", "id": "compute.backendServices.setSecurityPolicy", "parameterOrder": [ @@ -1541,7 +1561,7 @@ ] }, "update": { - "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", "httpMethod": "PUT", "id": "compute.backendServices.update", "parameterOrder": [ @@ -1586,7 +1606,7 @@ "diskTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of disk types. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of disk types.", "httpMethod": "GET", "id": "compute.diskTypes.aggregatedList", "parameterOrder": [ @@ -1594,25 +1614,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -1635,7 +1660,7 @@ ] }, "get": { - "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", "httpMethod": "GET", "id": "compute.diskTypes.get", "parameterOrder": [ @@ -1677,7 +1702,7 @@ ] }, "list": { - "description": "Retrieves a list of disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of disk types available to the specified project.", "httpMethod": "GET", "id": "compute.diskTypes.list", "parameterOrder": [ @@ -1686,25 +1711,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -1738,7 +1763,7 @@ "disks": { "methods": { "addResourcePolicies": { - "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", "httpMethod": "POST", "id": "compute.disks.addResourcePolicies", "parameterOrder": [ @@ -1787,7 +1812,7 @@ ] }, "aggregatedList": { - "description": "Retrieves an aggregated list of persistent disks. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of persistent disks.", "httpMethod": "GET", "id": "compute.disks.aggregatedList", "parameterOrder": [ @@ -1795,25 +1820,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -1836,7 +1866,7 @@ ] }, "createSnapshot": { - "description": "Creates a snapshot of a specified persistent disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a snapshot of a specified persistent disk.", "httpMethod": "POST", "id": "compute.disks.createSnapshot", "parameterOrder": [ @@ -1890,7 +1920,7 @@ ] }, "delete": { - "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", "httpMethod": "DELETE", "id": "compute.disks.delete", "parameterOrder": [ @@ -1935,7 +1965,7 @@ ] }, "get": { - "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", "httpMethod": "GET", "id": "compute.disks.get", "parameterOrder": [ @@ -1977,7 +2007,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.disks.getIamPolicy", "parameterOrder": [ @@ -2019,7 +2049,7 @@ ] }, "insert": { - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", "httpMethod": "POST", "id": "compute.disks.insert", "parameterOrder": [ @@ -2065,7 +2095,7 @@ ] }, "list": { - "description": "Retrieves a list of persistent disks contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of persistent disks contained within the specified zone.", "httpMethod": "GET", "id": "compute.disks.list", "parameterOrder": [ @@ -2074,25 +2104,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -2122,7 +2152,7 @@ ] }, "removeResourcePolicies": { - "description": "Removes resource policies from a disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Removes resource policies from a disk.", "httpMethod": "POST", "id": "compute.disks.removeResourcePolicies", "parameterOrder": [ @@ -2171,7 +2201,7 @@ ] }, "resize": { - "description": "Resizes the specified persistent disk. You can only increase the size of the disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", "httpMethod": "POST", "id": "compute.disks.resize", "parameterOrder": [ @@ -2220,7 +2250,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.disks.setIamPolicy", "parameterOrder": [ @@ -2264,7 +2294,7 @@ ] }, "setLabels": { - "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", "id": "compute.disks.setLabels", "parameterOrder": [ @@ -2313,7 +2343,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.disks.testIamPermissions", "parameterOrder": [ @@ -2362,7 +2392,7 @@ "externalVpnGateways": { "methods": { "delete": { - "description": "Deletes the specified externalVpnGateway. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified externalVpnGateway.", "httpMethod": "DELETE", "id": "compute.externalVpnGateways.delete", "parameterOrder": [ @@ -2400,7 +2430,7 @@ ] }, "get": { - "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", "httpMethod": "GET", "id": "compute.externalVpnGateways.get", "parameterOrder": [ @@ -2434,7 +2464,7 @@ ] }, "insert": { - "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.externalVpnGateways.insert", "parameterOrder": [ @@ -2467,7 +2497,7 @@ ] }, "list": { - "description": "Retrieves the list of ExternalVpnGateway available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", "httpMethod": "GET", "id": "compute.externalVpnGateways.list", "parameterOrder": [ @@ -2475,25 +2505,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -2516,7 +2546,7 @@ ] }, "setLabels": { - "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", "id": "compute.externalVpnGateways.setLabels", "parameterOrder": [ @@ -2552,7 +2582,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.externalVpnGateways.testIamPermissions", "parameterOrder": [ @@ -2593,7 +2623,7 @@ "firewalls": { "methods": { "delete": { - "description": "Deletes the specified firewall. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified firewall.", "httpMethod": "DELETE", "id": "compute.firewalls.delete", "parameterOrder": [ @@ -2631,7 +2661,7 @@ ] }, "get": { - "description": "Returns the specified firewall. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified firewall.", "httpMethod": "GET", "id": "compute.firewalls.get", "parameterOrder": [ @@ -2665,7 +2695,7 @@ ] }, "insert": { - "description": "Creates a firewall rule in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a firewall rule in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.firewalls.insert", "parameterOrder": [ @@ -2698,7 +2728,7 @@ ] }, "list": { - "description": "Retrieves the list of firewall rules available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of firewall rules available to the specified project.", "httpMethod": "GET", "id": "compute.firewalls.list", "parameterOrder": [ @@ -2706,25 +2736,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -2747,7 +2777,7 @@ ] }, "patch": { - "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.firewalls.patch", "parameterOrder": [ @@ -2788,7 +2818,7 @@ ] }, "update": { - "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead.", "httpMethod": "PUT", "id": "compute.firewalls.update", "parameterOrder": [ @@ -2833,7 +2863,7 @@ "forwardingRules": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of forwarding rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of forwarding rules.", "httpMethod": "GET", "id": "compute.forwardingRules.aggregatedList", "parameterOrder": [ @@ -2841,25 +2871,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -2882,7 +2917,7 @@ ] }, "delete": { - "description": "Deletes the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified ForwardingRule resource.", "httpMethod": "DELETE", "id": "compute.forwardingRules.delete", "parameterOrder": [ @@ -2928,7 +2963,7 @@ ] }, "get": { - "description": "Returns the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified ForwardingRule resource.", "httpMethod": "GET", "id": "compute.forwardingRules.get", "parameterOrder": [ @@ -2970,7 +3005,7 @@ ] }, "insert": { - "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.forwardingRules.insert", "parameterOrder": [ @@ -3011,7 +3046,7 @@ ] }, "list": { - "description": "Retrieves a list of ForwardingRule resources available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", "httpMethod": "GET", "id": "compute.forwardingRules.list", "parameterOrder": [ @@ -3020,25 +3055,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3067,8 +3102,57 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + "httpMethod": "PATCH", + "id": "compute.forwardingRules.patch", + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "parameters": { + "forwardingRule": { + "description": "Name of the ForwardingRule resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setTarget": { - "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", "httpMethod": "POST", "id": "compute.forwardingRules.setTarget", "parameterOrder": [ @@ -3121,7 +3205,7 @@ "globalAddresses": { "methods": { "delete": { - "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified address resource.", "httpMethod": "DELETE", "id": "compute.globalAddresses.delete", "parameterOrder": [ @@ -3159,7 +3243,7 @@ ] }, "get": { - "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", "httpMethod": "GET", "id": "compute.globalAddresses.get", "parameterOrder": [ @@ -3193,7 +3277,7 @@ ] }, "insert": { - "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an address resource in the specified project by using the data included in the request.", "httpMethod": "POST", "id": "compute.globalAddresses.insert", "parameterOrder": [ @@ -3226,7 +3310,7 @@ ] }, "list": { - "description": "Retrieves a list of global addresses. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of global addresses.", "httpMethod": "GET", "id": "compute.globalAddresses.list", "parameterOrder": [ @@ -3234,25 +3318,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3279,7 +3363,7 @@ "globalForwardingRules": { "methods": { "delete": { - "description": "Deletes the specified GlobalForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified GlobalForwardingRule resource.", "httpMethod": "DELETE", "id": "compute.globalForwardingRules.delete", "parameterOrder": [ @@ -3317,7 +3401,7 @@ ] }, "get": { - "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", "httpMethod": "GET", "id": "compute.globalForwardingRules.get", "parameterOrder": [ @@ -3351,7 +3435,7 @@ ] }, "insert": { - "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.globalForwardingRules.insert", "parameterOrder": [ @@ -3384,7 +3468,7 @@ ] }, "list": { - "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", "httpMethod": "GET", "id": "compute.globalForwardingRules.list", "parameterOrder": [ @@ -3392,25 +3476,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3432,8 +3516,49 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + "httpMethod": "PATCH", + "id": "compute.globalForwardingRules.patch", + "parameterOrder": [ + "project", + "forwardingRule" + ], + "parameters": { + "forwardingRule": { + "description": "Name of the ForwardingRule resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/forwardingRules/{forwardingRule}", + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setTarget": { - "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", "httpMethod": "POST", "id": "compute.globalForwardingRules.setTarget", "parameterOrder": [ @@ -3475,36 +3600,333 @@ } } }, - "globalOperations": { + "globalNetworkEndpointGroups": { "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of all operations. (== suppress_warning http-rest-shadowed ==)", + "attachNetworkEndpoints": { + "description": "Attach a network endpoint to the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.attachNetworkEndpoints", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + "request": { + "$ref": "GlobalNetworkEndpointGroupsAttachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "description": "Deletes the specified network endpoint group.Note that the NEG cannot be deleted if there are backend services referencing it.", + "httpMethod": "DELETE", + "id": "compute.globalNetworkEndpointGroups.delete", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "detachNetworkEndpoints": { + "description": "Detach the network endpoint from the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.detachNetworkEndpoints", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "request": { + "$ref": "GlobalNetworkEndpointGroupsDetachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", "httpMethod": "GET", - "id": "compute.globalOperations.aggregatedList", + "id": "compute.globalNetworkEndpointGroups.get", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "NetworkEndpointGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of network endpoint groups that are located in the specified project.", + "httpMethod": "GET", + "id": "compute.globalNetworkEndpointGroups.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups", + "response": { + "$ref": "NetworkEndpointGroupList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listNetworkEndpoints": { + "description": "Lists the network endpoints in the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.listNetworkEndpoints", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "networkEndpointGroup": { + "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "response": { + "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "globalOperations": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of all operations.", + "httpMethod": "GET", + "id": "compute.globalOperations.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3527,7 +3949,7 @@ ] }, "delete": { - "description": "Deletes the specified Operations resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified Operations resource.", "httpMethod": "DELETE", "id": "compute.globalOperations.delete", "parameterOrder": [ @@ -3557,7 +3979,7 @@ ] }, "get": { - "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the specified Operations resource. Gets a list of operations by making a `list()` request.", "httpMethod": "GET", "id": "compute.globalOperations.get", "parameterOrder": [ @@ -3591,7 +4013,7 @@ ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of Operation resources contained within the specified project.", "httpMethod": "GET", "id": "compute.globalOperations.list", "parameterOrder": [ @@ -3599,25 +4021,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3638,13 +4060,47 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + "httpMethod": "POST", + "id": "compute.globalOperations.wait", + "parameterOrder": [ + "project", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/operations/{operation}/wait", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, "healthChecks": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", "httpMethod": "GET", "id": "compute.healthChecks.aggregatedList", "parameterOrder": [ @@ -3652,25 +4108,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3693,7 +4154,7 @@ ] }, "delete": { - "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified HealthCheck resource.", "httpMethod": "DELETE", "id": "compute.healthChecks.delete", "parameterOrder": [ @@ -3731,7 +4192,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", "httpMethod": "GET", "id": "compute.healthChecks.get", "parameterOrder": [ @@ -3765,7 +4226,7 @@ ] }, "insert": { - "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.healthChecks.insert", "parameterOrder": [ @@ -3798,7 +4259,7 @@ ] }, "list": { - "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of HealthCheck resources available to the specified project.", "httpMethod": "GET", "id": "compute.healthChecks.list", "parameterOrder": [ @@ -3806,25 +4267,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -3847,7 +4308,7 @@ ] }, "patch": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.healthChecks.patch", "parameterOrder": [ @@ -3888,7 +4349,7 @@ ] }, "update": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", "httpMethod": "PUT", "id": "compute.healthChecks.update", "parameterOrder": [ @@ -3933,7 +4394,7 @@ "httpHealthChecks": { "methods": { "delete": { - "description": "Deletes the specified HttpHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified HttpHealthCheck resource.", "httpMethod": "DELETE", "id": "compute.httpHealthChecks.delete", "parameterOrder": [ @@ -3971,7 +4432,7 @@ ] }, "get": { - "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", "httpMethod": "GET", "id": "compute.httpHealthChecks.get", "parameterOrder": [ @@ -4005,7 +4466,7 @@ ] }, "insert": { - "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.httpHealthChecks.insert", "parameterOrder": [ @@ -4038,7 +4499,7 @@ ] }, "list": { - "description": "Retrieves the list of HttpHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", "httpMethod": "GET", "id": "compute.httpHealthChecks.list", "parameterOrder": [ @@ -4046,25 +4507,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -4087,7 +4548,7 @@ ] }, "patch": { - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.httpHealthChecks.patch", "parameterOrder": [ @@ -4128,7 +4589,7 @@ ] }, "update": { - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", "httpMethod": "PUT", "id": "compute.httpHealthChecks.update", "parameterOrder": [ @@ -4173,7 +4634,7 @@ "httpsHealthChecks": { "methods": { "delete": { - "description": "Deletes the specified HttpsHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified HttpsHealthCheck resource.", "httpMethod": "DELETE", "id": "compute.httpsHealthChecks.delete", "parameterOrder": [ @@ -4211,7 +4672,7 @@ ] }, "get": { - "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", "httpMethod": "GET", "id": "compute.httpsHealthChecks.get", "parameterOrder": [ @@ -4245,7 +4706,7 @@ ] }, "insert": { - "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.httpsHealthChecks.insert", "parameterOrder": [ @@ -4278,7 +4739,7 @@ ] }, "list": { - "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", "httpMethod": "GET", "id": "compute.httpsHealthChecks.list", "parameterOrder": [ @@ -4286,25 +4747,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -4327,7 +4788,7 @@ ] }, "patch": { - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.httpsHealthChecks.patch", "parameterOrder": [ @@ -4368,7 +4829,7 @@ ] }, "update": { - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", "httpMethod": "PUT", "id": "compute.httpsHealthChecks.update", "parameterOrder": [ @@ -4413,7 +4874,7 @@ "images": { "methods": { "delete": { - "description": "Deletes the specified image. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified image.", "httpMethod": "DELETE", "id": "compute.images.delete", "parameterOrder": [ @@ -4451,7 +4912,7 @@ ] }, "deprecate": { - "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", "httpMethod": "POST", "id": "compute.images.deprecate", "parameterOrder": [ @@ -4492,7 +4953,7 @@ ] }, "get": { - "description": "Returns the specified image. Gets a list of available images by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified image. Gets a list of available images by making a list() request.", "httpMethod": "GET", "id": "compute.images.get", "parameterOrder": [ @@ -4526,7 +4987,7 @@ ] }, "getFromFamily": { - "description": "Returns the latest image that is part of an image family and is not deprecated. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the latest image that is part of an image family and is not deprecated.", "httpMethod": "GET", "id": "compute.images.getFromFamily", "parameterOrder": [ @@ -4560,7 +5021,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.images.getIamPolicy", "parameterOrder": [ @@ -4594,7 +5055,7 @@ ] }, "insert": { - "description": "Creates an image in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an image in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.images.insert", "parameterOrder": [ @@ -4635,7 +5096,7 @@ ] }, "list": { - "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "httpMethod": "GET", "id": "compute.images.list", "parameterOrder": [ @@ -4643,25 +5104,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -4684,7 +5145,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.images.setIamPolicy", "parameterOrder": [ @@ -4720,7 +5181,7 @@ ] }, "setLabels": { - "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", "id": "compute.images.setLabels", "parameterOrder": [ @@ -4756,7 +5217,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.images.testIamPermissions", "parameterOrder": [ @@ -4797,7 +5258,7 @@ "instanceGroupManagers": { "methods": { "abandonInstances": { - "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.abandonInstances", "parameterOrder": [ @@ -4844,7 +5305,7 @@ ] }, "aggregatedList": { - "description": "Retrieves the list of managed instance groups and groups them by zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of managed instance groups and groups them by zone.", "httpMethod": "GET", "id": "compute.instanceGroupManagers.aggregatedList", "parameterOrder": [ @@ -4852,25 +5313,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -4892,8 +5358,97 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "applyUpdatesToInstances": { + "description": "Applies changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.applyUpdatesToInstances", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "request": { + "$ref": "InstanceGroupManagersApplyUpdatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "createInstances": { + "description": "Creates instances with per-instance configs in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.createInstances", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", + "request": { + "$ref": "InstanceGroupManagersCreateInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { - "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", "httpMethod": "DELETE", "id": "compute.instanceGroupManagers.delete", "parameterOrder": [ @@ -4937,7 +5492,7 @@ ] }, "deleteInstances": { - "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.deleteInstances", "parameterOrder": [ @@ -4984,7 +5539,7 @@ ] }, "get": { - "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", "httpMethod": "GET", "id": "compute.instanceGroupManagers.get", "parameterOrder": [ @@ -5024,7 +5579,7 @@ ] }, "insert": { - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.insert", "parameterOrder": [ @@ -5064,7 +5619,7 @@ ] }, "list": { - "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", "httpMethod": "GET", "id": "compute.instanceGroupManagers.list", "parameterOrder": [ @@ -5073,25 +5628,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -5119,8 +5674,71 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listErrors": { + "description": "Lists all errors thrown by actions on instances for a given managed instance group.", + "httpMethod": "GET", + "id": "compute.instanceGroupManagers.listErrors", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors", + "response": { + "$ref": "InstanceGroupManagersListErrorsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -5130,7 +5748,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, @@ -5142,19 +5760,19 @@ }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, - "order_by": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -5183,7 +5801,7 @@ ] }, "patch": { - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.instanceGroupManagers.patch", "parameterOrder": [ @@ -5230,7 +5848,7 @@ ] }, "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.recreateInstances", "parameterOrder": [ @@ -5277,7 +5895,7 @@ ] }, "resize": { - "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.resize", "parameterOrder": [ @@ -5329,7 +5947,7 @@ ] }, "setInstanceTemplate": { - "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them. (== suppress_warning http-rest-shadowed ==)", + "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.setInstanceTemplate", "parameterOrder": [ @@ -5376,7 +5994,7 @@ ] }, "setTargetPools": { - "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group. (== suppress_warning http-rest-shadowed ==)", + "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.setTargetPools", "parameterOrder": [ @@ -5427,7 +6045,7 @@ "instanceGroups": { "methods": { "addInstances": { - "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", "httpMethod": "POST", "id": "compute.instanceGroups.addInstances", "parameterOrder": [ @@ -5474,7 +6092,7 @@ ] }, "aggregatedList": { - "description": "Retrieves the list of instance groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of instance groups and sorts them by zone.", "httpMethod": "GET", "id": "compute.instanceGroups.aggregatedList", "parameterOrder": [ @@ -5482,25 +6100,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -5523,7 +6146,7 @@ ] }, "delete": { - "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", "httpMethod": "DELETE", "id": "compute.instanceGroups.delete", "parameterOrder": [ @@ -5567,7 +6190,7 @@ ] }, "get": { - "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", "httpMethod": "GET", "id": "compute.instanceGroups.get", "parameterOrder": [ @@ -5607,7 +6230,7 @@ ] }, "insert": { - "description": "Creates an instance group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an instance group in the specified project using the parameters that are included in the request.", "httpMethod": "POST", "id": "compute.instanceGroups.insert", "parameterOrder": [ @@ -5647,7 +6270,7 @@ ] }, "list": { - "description": "Retrieves the list of instance groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of instance groups that are located in the specified project and zone.", "httpMethod": "GET", "id": "compute.instanceGroups.list", "parameterOrder": [ @@ -5656,25 +6279,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -5703,7 +6326,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists the instances in the specified instance group.", "httpMethod": "POST", "id": "compute.instanceGroups.listInstances", "parameterOrder": [ @@ -5713,7 +6336,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, @@ -5725,19 +6348,19 @@ }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -5769,7 +6392,7 @@ ] }, "removeInstances": { - "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", + "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", "httpMethod": "POST", "id": "compute.instanceGroups.removeInstances", "parameterOrder": [ @@ -5816,7 +6439,7 @@ ] }, "setNamedPorts": { - "description": "Sets the named ports for the specified instance group. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the named ports for the specified instance group.", "httpMethod": "POST", "id": "compute.instanceGroups.setNamedPorts", "parameterOrder": [ @@ -5867,7 +6490,7 @@ "instanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", "httpMethod": "DELETE", "id": "compute.instanceTemplates.delete", "parameterOrder": [ @@ -5905,7 +6528,7 @@ ] }, "get": { - "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", "httpMethod": "GET", "id": "compute.instanceTemplates.get", "parameterOrder": [ @@ -5939,7 +6562,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.instanceTemplates.getIamPolicy", "parameterOrder": [ @@ -5973,7 +6596,7 @@ ] }, "insert": { - "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", "httpMethod": "POST", "id": "compute.instanceTemplates.insert", "parameterOrder": [ @@ -6006,7 +6629,7 @@ ] }, "list": { - "description": "Retrieves a list of instance templates that are contained within the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of instance templates that are contained within the specified project.", "httpMethod": "GET", "id": "compute.instanceTemplates.list", "parameterOrder": [ @@ -6014,25 +6637,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -6055,7 +6678,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.instanceTemplates.setIamPolicy", "parameterOrder": [ @@ -6091,7 +6714,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.instanceTemplates.testIamPermissions", "parameterOrder": [ @@ -6132,7 +6755,7 @@ "instances": { "methods": { "addAccessConfig": { - "description": "Adds an access config to an instance's network interface. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds an access config to an instance's network interface.", "httpMethod": "POST", "id": "compute.instances.addAccessConfig", "parameterOrder": [ @@ -6187,8 +6810,57 @@ "https://www.googleapis.com/auth/compute" ] }, + "addResourcePolicies": { + "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", + "httpMethod": "POST", + "id": "compute.instances.addResourcePolicies", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "The instance name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/addResourcePolicies", + "request": { + "$ref": "InstancesAddResourcePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "aggregatedList": { - "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", "httpMethod": "GET", "id": "compute.instances.aggregatedList", "parameterOrder": [ @@ -6196,25 +6868,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -6237,7 +6914,7 @@ ] }, "attachDisk": { - "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance. (== suppress_warning http-rest-shadowed ==)", + "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", "httpMethod": "POST", "id": "compute.instances.attachDisk", "parameterOrder": [ @@ -6291,7 +6968,7 @@ ] }, "delete": { - "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", "httpMethod": "DELETE", "id": "compute.instances.delete", "parameterOrder": [ @@ -6337,7 +7014,7 @@ ] }, "deleteAccessConfig": { - "description": "Deletes an access config from an instance's network interface. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes an access config from an instance's network interface.", "httpMethod": "POST", "id": "compute.instances.deleteAccessConfig", "parameterOrder": [ @@ -6397,7 +7074,7 @@ ] }, "detachDisk": { - "description": "Detaches a disk from an instance. (== suppress_warning http-rest-shadowed ==)", + "description": "Detaches a disk from an instance.", "httpMethod": "POST", "id": "compute.instances.detachDisk", "parameterOrder": [ @@ -6450,7 +7127,7 @@ ] }, "get": { - "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", "httpMethod": "GET", "id": "compute.instances.get", "parameterOrder": [ @@ -6492,7 +7169,7 @@ ] }, "getGuestAttributes": { - "description": "Returns the specified guest attributes entry. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified guest attributes entry.", "httpMethod": "GET", "id": "compute.instances.getGuestAttributes", "parameterOrder": [ @@ -6544,7 +7221,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.instances.getIamPolicy", "parameterOrder": [ @@ -6586,7 +7263,7 @@ ] }, "getSerialPortOutput": { - "description": "Returns the last 1 MB of serial port output from the specified instance. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the last 1 MB of serial port output from the specified instance.", "httpMethod": "GET", "id": "compute.instances.getSerialPortOutput", "parameterOrder": [ @@ -6643,7 +7320,7 @@ ] }, "getShieldedInstanceIdentity": { - "description": "Returns the Shielded Instance Identity of an instance (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the Shielded Instance Identity of an instance", "httpMethod": "GET", "id": "compute.instances.getShieldedInstanceIdentity", "parameterOrder": [ @@ -6685,7 +7362,7 @@ ] }, "insert": { - "description": "Creates an instance resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an instance resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.instances.insert", "parameterOrder": [ @@ -6731,7 +7408,7 @@ ] }, "list": { - "description": "Retrieves the list of instances contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of instances contained within the specified zone.", "httpMethod": "GET", "id": "compute.instances.list", "parameterOrder": [ @@ -6740,25 +7417,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -6788,7 +7465,7 @@ ] }, "listReferrers": { - "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", "httpMethod": "GET", "id": "compute.instances.listReferrers", "parameterOrder": [ @@ -6798,7 +7475,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, @@ -6811,19 +7488,19 @@ }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -6852,129 +7529,16 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "reset": { - "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance. (== suppress_warning http-rest-shadowed ==)", + "removeResourcePolicies": { + "description": "Removes resource policies from an instance.", "httpMethod": "POST", - "id": "compute.instances.reset", + "id": "compute.instances.removeResourcePolicies", "parameterOrder": [ "project", "zone", "instance" ], "parameters": { - "instance": { - "description": "Name of the instance scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/instances/{instance}/reset", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setDeletionProtection": { - "description": "Sets deletion protection on the instance. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.instances.setDeletionProtection", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "deletionProtection": { - "default": "true", - "description": "Whether the resource should be protected against deletion.", - "location": "query", - "type": "boolean" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setDiskAutoDelete": { - "description": "Sets the auto-delete flag for a disk attached to an instance. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.instances.setDiskAutoDelete", - "parameterOrder": [ - "project", - "zone", - "instance", - "autoDelete", - "deviceName" - ], - "parameters": { - "autoDelete": { - "description": "Whether to auto-delete the disk when the instance is deleted.", - "location": "query", - "required": true, - "type": "boolean" - }, - "deviceName": { - "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", - "location": "query", - "pattern": "\\w[\\w.-]{0,254}", - "required": true, - "type": "string" - }, "instance": { "description": "The instance name for this request.", "location": "path", @@ -7002,99 +7566,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.instances.setIamPolicy", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", - "request": { - "$ref": "ZoneSetPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setLabels": { - "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.instances.setLabels", - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "parameters": { - "instance": { - "description": "Name of the instance scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + "path": "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", "request": { - "$ref": "InstancesSetLabelsRequest" + "$ref": "InstancesRemoveResourcePoliciesRequest" }, "response": { "$ref": "Operation" @@ -7104,10 +7578,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setMachineResources": { - "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request. (== suppress_warning http-rest-shadowed ==)", + "reset": { + "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", "httpMethod": "POST", - "id": "compute.instances.setMachineResources", + "id": "compute.instances.reset", "parameterOrder": [ "project", "zone", @@ -7141,10 +7615,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", - "request": { - "$ref": "InstancesSetMachineResourcesRequest" - }, + "path": "{project}/zones/{zone}/instances/{instance}/reset", "response": { "$ref": "Operation" }, @@ -7153,22 +7624,21 @@ "https://www.googleapis.com/auth/compute" ] }, - "setMachineType": { - "description": "Changes the machine type for a stopped instance to the machine type specified in the request. (== suppress_warning http-rest-shadowed ==)", + "setDeletionProtection": { + "description": "Sets deletion protection on the instance.", "httpMethod": "POST", - "id": "compute.instances.setMachineType", + "id": "compute.instances.setDeletionProtection", "parameterOrder": [ "project", "zone", - "instance" + "resource" ], "parameters": { - "instance": { - "description": "Name of the instance scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" + "deletionProtection": { + "default": "true", + "description": "Whether the resource should be protected against deletion.", + "location": "query", + "type": "boolean" }, "project": { "description": "Project ID for this request.", @@ -7182,6 +7652,13 @@ "location": "query", "type": "string" }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -7190,10 +7667,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - "request": { - "$ref": "InstancesSetMachineTypeRequest" - }, + "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", "response": { "$ref": "Operation" }, @@ -7202,18 +7676,33 @@ "https://www.googleapis.com/auth/compute" ] }, - "setMetadata": { - "description": "Sets metadata for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "setDiskAutoDelete": { + "description": "Sets the auto-delete flag for a disk attached to an instance.", "httpMethod": "POST", - "id": "compute.instances.setMetadata", + "id": "compute.instances.setDiskAutoDelete", "parameterOrder": [ "project", "zone", - "instance" + "instance", + "autoDelete", + "deviceName" ], "parameters": { + "autoDelete": { + "description": "Whether to auto-delete the disk when the instance is deleted.", + "location": "query", + "required": true, + "type": "boolean" + }, + "deviceName": { + "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", + "location": "query", + "pattern": "\\w[\\w.-]{0,254}", + "required": true, + "type": "string" + }, "instance": { - "description": "Name of the instance scoping this request.", + "description": "The instance name for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7239,22 +7728,63 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "httpMethod": "POST", + "id": "compute.instances.setIamPolicy", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", "request": { - "$ref": "Metadata" + "$ref": "ZoneSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setMinCpuPlatform": { - "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform. (== suppress_warning http-rest-shadowed ==)", + "setLabels": { + "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", - "id": "compute.instances.setMinCpuPlatform", + "id": "compute.instances.setLabels", "parameterOrder": [ "project", "zone", @@ -7288,9 +7818,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + "path": "{project}/zones/{zone}/instances/{instance}/setLabels", "request": { - "$ref": "InstancesSetMinCpuPlatformRequest" + "$ref": "InstancesSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -7300,10 +7830,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setScheduling": { - "description": "Sets an instance's scheduling options. (== suppress_warning http-rest-shadowed ==)", + "setMachineResources": { + "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", "httpMethod": "POST", - "id": "compute.instances.setScheduling", + "id": "compute.instances.setMachineResources", "parameterOrder": [ "project", "zone", @@ -7311,7 +7841,7 @@ ], "parameters": { "instance": { - "description": "Instance name for this request.", + "description": "Name of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7337,9 +7867,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", "request": { - "$ref": "Scheduling" + "$ref": "InstancesSetMachineResourcesRequest" }, "response": { "$ref": "Operation" @@ -7349,10 +7879,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setServiceAccount": { - "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance. (== suppress_warning http-rest-shadowed ==)", + "setMachineType": { + "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", "httpMethod": "POST", - "id": "compute.instances.setServiceAccount", + "id": "compute.instances.setMachineType", "parameterOrder": [ "project", "zone", @@ -7360,7 +7890,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to start.", + "description": "Name of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7386,9 +7916,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", "request": { - "$ref": "InstancesSetServiceAccountRequest" + "$ref": "InstancesSetMachineTypeRequest" }, "response": { "$ref": "Operation" @@ -7398,10 +7928,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setShieldedInstanceIntegrityPolicy": { - "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "PATCH", - "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + "setMetadata": { + "description": "Sets metadata for the specified instance to the data included in the request.", + "httpMethod": "POST", + "id": "compute.instances.setMetadata", "parameterOrder": [ "project", "zone", @@ -7409,7 +7939,7 @@ ], "parameters": { "instance": { - "description": "Name or id of the instance scoping this request.", + "description": "Name of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7435,9 +7965,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", "request": { - "$ref": "ShieldedInstanceIntegrityPolicy" + "$ref": "Metadata" }, "response": { "$ref": "Operation" @@ -7447,10 +7977,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setTags": { - "description": "Sets network tags for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "setMinCpuPlatform": { + "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", "httpMethod": "POST", - "id": "compute.instances.setTags", + "id": "compute.instances.setMinCpuPlatform", "parameterOrder": [ "project", "zone", @@ -7484,9 +8014,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", "request": { - "$ref": "Tags" + "$ref": "InstancesSetMinCpuPlatformRequest" }, "response": { "$ref": "Operation" @@ -7496,10 +8026,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "simulateMaintenanceEvent": { - "description": "Simulates a maintenance event on the instance. (== suppress_warning http-rest-shadowed ==)", + "setScheduling": { + "description": "Sets an instance's scheduling options.", "httpMethod": "POST", - "id": "compute.instances.simulateMaintenanceEvent", + "id": "compute.instances.setScheduling", "parameterOrder": [ "project", "zone", @@ -7507,7 +8037,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance scoping this request.", + "description": "Instance name for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7520,6 +8050,11 @@ "required": true, "type": "string" }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -7528,7 +8063,10 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + "request": { + "$ref": "Scheduling" + }, "response": { "$ref": "Operation" }, @@ -7537,10 +8075,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "start": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", + "setServiceAccount": { + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", "httpMethod": "POST", - "id": "compute.instances.start", + "id": "compute.instances.setServiceAccount", "parameterOrder": [ "project", "zone", @@ -7574,7 +8112,10 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/start", + "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "request": { + "$ref": "InstancesSetServiceAccountRequest" + }, "response": { "$ref": "Operation" }, @@ -7583,10 +8124,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "startWithEncryptionKey": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.instances.startWithEncryptionKey", + "setShieldedInstanceIntegrityPolicy": { + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.setShieldedInstanceIntegrityPolicy", "parameterOrder": [ "project", "zone", @@ -7594,7 +8135,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to start.", + "description": "Name or id of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7620,9 +8161,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", "request": { - "$ref": "InstancesStartWithEncryptionKeyRequest" + "$ref": "ShieldedInstanceIntegrityPolicy" }, "response": { "$ref": "Operation" @@ -7632,10 +8173,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "stop": { - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance. (== suppress_warning http-rest-shadowed ==)", + "setTags": { + "description": "Sets network tags for the specified instance to the data included in the request.", "httpMethod": "POST", - "id": "compute.instances.stop", + "id": "compute.instances.setTags", "parameterOrder": [ "project", "zone", @@ -7643,7 +8184,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to stop.", + "description": "Name of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -7669,7 +8210,10 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/stop", + "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "request": { + "$ref": "Tags" + }, "response": { "$ref": "Operation" }, @@ -7678,27 +8222,27 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "simulateMaintenanceEvent": { + "description": "Simulates a maintenance event on the instance.", "httpMethod": "POST", - "id": "compute.instances.testIamPermissions", + "id": "compute.instances.simulateMaintenanceEvent", "parameterOrder": [ "project", "zone", - "resource" + "instance" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "instance": { + "description": "Name of the instance scoping this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, @@ -7710,43 +8254,32 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "updateAccessConfig": { - "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "start": { + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", "httpMethod": "POST", - "id": "compute.instances.updateAccessConfig", + "id": "compute.instances.start", "parameterOrder": [ "project", "zone", - "instance", - "networkInterface" + "instance" ], "parameters": { "instance": { - "description": "The instance name for this request.", + "description": "Name of the instance resource to start.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "networkInterface": { - "description": "The name of the network interface where the access config is attached.", - "location": "query", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -7767,10 +8300,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", - "request": { - "$ref": "AccessConfig" - }, + "path": "{project}/zones/{zone}/instances/{instance}/start", "response": { "$ref": "Operation" }, @@ -7779,10 +8309,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "updateDisplayDevice": { - "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "PATCH", - "id": "compute.instances.updateDisplayDevice", + "startWithEncryptionKey": { + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + "httpMethod": "POST", + "id": "compute.instances.startWithEncryptionKey", "parameterOrder": [ "project", "zone", @@ -7790,9 +8320,288 @@ ], "parameters": { "instance": { - "description": "Name of the instance scoping this request.", + "description": "Name of the instance resource to start.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "request": { + "$ref": "InstancesStartWithEncryptionKeyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stop": { + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "httpMethod": "POST", + "id": "compute.instances.stop", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance resource to stop.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/stop", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.instances.testIamPermissions", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "description": "Updates an instance only if the necessary resources are available. This method can update only a specific set of instance properties. See Updating a running instance for a list of updatable instance properties.", + "httpMethod": "PUT", + "id": "compute.instances.update", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "minimalAction": { + "description": "Specifies the action to take when updating an instance even if the updated properties do not require it. If not specified, then Compute Engine acts based on the minimum action that the updated properties require.", + "enum": [ + "INVALID", + "NO_EFFECT", + "REFRESH", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "Specifies the most disruptive action that can be taken on the instance as part of the update. Compute Engine returns an error if the instance properties require a more disruptive action as part of the instance update. Valid options from lowest to highest are NO_EFFECT, REFRESH, and RESTART.", + "enum": [ + "INVALID", + "NO_EFFECT", + "REFRESH", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}", + "request": { + "$ref": "Instance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "updateAccessConfig": { + "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "POST", + "id": "compute.instances.updateAccessConfig", + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "parameters": { + "instance": { + "description": "The instance name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "networkInterface": { + "description": "The name of the network interface where the access config is attached.", + "location": "query", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + "request": { + "$ref": "AccessConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "updateDisplayDevice": { + "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.updateDisplayDevice", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -7829,7 +8638,7 @@ ] }, "updateNetworkInterface": { - "description": "Updates an instance's network interface. This method follows PATCH semantics. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates an instance's network interface. This method follows PATCH semantics.", "httpMethod": "PATCH", "id": "compute.instances.updateNetworkInterface", "parameterOrder": [ @@ -7885,7 +8694,7 @@ ] }, "updateShieldedInstanceConfig": { - "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.instances.updateShieldedInstanceConfig", "parameterOrder": [ @@ -7938,7 +8747,7 @@ "interconnectAttachments": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of interconnect attachments. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of interconnect attachments.", "httpMethod": "GET", "id": "compute.interconnectAttachments.aggregatedList", "parameterOrder": [ @@ -7946,25 +8755,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -7987,7 +8801,7 @@ ] }, "delete": { - "description": "Deletes the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified interconnect attachment.", "httpMethod": "DELETE", "id": "compute.interconnectAttachments.delete", "parameterOrder": [ @@ -8033,7 +8847,7 @@ ] }, "get": { - "description": "Returns the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified interconnect attachment.", "httpMethod": "GET", "id": "compute.interconnectAttachments.get", "parameterOrder": [ @@ -8075,7 +8889,7 @@ ] }, "insert": { - "description": "Creates an InterconnectAttachment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.interconnectAttachments.insert", "parameterOrder": [ @@ -8101,6 +8915,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/interconnectAttachments", @@ -8116,7 +8935,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect attachments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of interconnect attachments contained within the specified region.", "httpMethod": "GET", "id": "compute.interconnectAttachments.list", "parameterOrder": [ @@ -8125,25 +8944,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -8173,7 +8992,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.interconnectAttachments.patch", "parameterOrder": [ @@ -8226,7 +9045,7 @@ "interconnectLocations": { "methods": { "get": { - "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", "httpMethod": "GET", "id": "compute.interconnectLocations.get", "parameterOrder": [ @@ -8260,7 +9079,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect locations available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of interconnect locations available to the specified project.", "httpMethod": "GET", "id": "compute.interconnectLocations.list", "parameterOrder": [ @@ -8268,25 +9087,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -8313,7 +9132,7 @@ "interconnects": { "methods": { "delete": { - "description": "Deletes the specified interconnect. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified interconnect.", "httpMethod": "DELETE", "id": "compute.interconnects.delete", "parameterOrder": [ @@ -8351,7 +9170,7 @@ ] }, "get": { - "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", "httpMethod": "GET", "id": "compute.interconnects.get", "parameterOrder": [ @@ -8385,7 +9204,7 @@ ] }, "getDiagnostics": { - "description": "Returns the interconnectDiagnostics for the specified interconnect. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the interconnectDiagnostics for the specified interconnect.", "httpMethod": "GET", "id": "compute.interconnects.getDiagnostics", "parameterOrder": [ @@ -8419,7 +9238,7 @@ ] }, "insert": { - "description": "Creates a Interconnect in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a Interconnect in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.interconnects.insert", "parameterOrder": [ @@ -8452,7 +9271,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of interconnect available to the specified project.", "httpMethod": "GET", "id": "compute.interconnects.list", "parameterOrder": [ @@ -8460,25 +9279,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -8501,7 +9320,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.interconnects.patch", "parameterOrder": [ @@ -8546,7 +9365,7 @@ "licenseCodes": { "methods": { "get": { - "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. (== suppress_warning http-rest-shadowed ==)", + "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "GET", "id": "compute.licenseCodes.get", "parameterOrder": [ @@ -8580,7 +9399,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "POST", "id": "compute.licenseCodes.testIamPermissions", "parameterOrder": [ @@ -8621,7 +9440,7 @@ "licenses": { "methods": { "delete": { - "description": "Deletes the specified license. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified license. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "DELETE", "id": "compute.licenses.delete", "parameterOrder": [ @@ -8659,7 +9478,7 @@ ] }, "get": { - "description": "Returns the specified License resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified License resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "GET", "id": "compute.licenses.get", "parameterOrder": [ @@ -8693,7 +9512,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "GET", "id": "compute.licenses.getIamPolicy", "parameterOrder": [ @@ -8727,7 +9546,7 @@ ] }, "insert": { - "description": "Create a License resource in the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Create a License resource in the specified project. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "POST", "id": "compute.licenses.insert", "parameterOrder": [ @@ -8763,7 +9582,7 @@ ] }, "list": { - "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "GET", "id": "compute.licenses.list", "parameterOrder": [ @@ -8771,25 +9590,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -8812,7 +9631,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "POST", "id": "compute.licenses.setIamPolicy", "parameterOrder": [ @@ -8848,7 +9667,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", "httpMethod": "POST", "id": "compute.licenses.testIamPermissions", "parameterOrder": [ @@ -8889,7 +9708,7 @@ "machineTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of machine types. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of machine types.", "httpMethod": "GET", "id": "compute.machineTypes.aggregatedList", "parameterOrder": [ @@ -8897,25 +9716,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -8938,7 +9762,7 @@ ] }, "get": { - "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", "httpMethod": "GET", "id": "compute.machineTypes.get", "parameterOrder": [ @@ -8980,7 +9804,7 @@ ] }, "list": { - "description": "Retrieves a list of machine types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of machine types available to the specified project.", "httpMethod": "GET", "id": "compute.machineTypes.list", "parameterOrder": [ @@ -8989,25 +9813,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -9041,7 +9865,7 @@ "networkEndpointGroups": { "methods": { "aggregatedList": { - "description": "Retrieves the list of network endpoint groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of network endpoint groups and sorts them by zone.", "httpMethod": "GET", "id": "compute.networkEndpointGroups.aggregatedList", "parameterOrder": [ @@ -9049,25 +9873,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -9090,7 +9919,7 @@ ] }, "attachNetworkEndpoints": { - "description": "Attach a list of network endpoints to the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", + "description": "Attach a list of network endpoints to the specified network endpoint group.", "httpMethod": "POST", "id": "compute.networkEndpointGroups.attachNetworkEndpoints", "parameterOrder": [ @@ -9137,7 +9966,7 @@ ] }, "delete": { - "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", "httpMethod": "DELETE", "id": "compute.networkEndpointGroups.delete", "parameterOrder": [ @@ -9181,7 +10010,7 @@ ] }, "detachNetworkEndpoints": { - "description": "Detach a list of network endpoints from the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", + "description": "Detach a list of network endpoints from the specified network endpoint group.", "httpMethod": "POST", "id": "compute.networkEndpointGroups.detachNetworkEndpoints", "parameterOrder": [ @@ -9228,7 +10057,7 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", "httpMethod": "GET", "id": "compute.networkEndpointGroups.get", "parameterOrder": [ @@ -9268,7 +10097,7 @@ ] }, "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", "httpMethod": "POST", "id": "compute.networkEndpointGroups.insert", "parameterOrder": [ @@ -9308,7 +10137,7 @@ ] }, "list": { - "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", "httpMethod": "GET", "id": "compute.networkEndpointGroups.list", "parameterOrder": [ @@ -9317,25 +10146,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -9364,7 +10193,7 @@ ] }, "listNetworkEndpoints": { - "description": "Lists the network endpoints in the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists the network endpoints in the specified network endpoint group.", "httpMethod": "POST", "id": "compute.networkEndpointGroups.listNetworkEndpoints", "parameterOrder": [ @@ -9374,13 +10203,13 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", @@ -9393,12 +10222,12 @@ "type": "string" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -9430,7 +10259,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.networkEndpointGroups.testIamPermissions", "parameterOrder": [ @@ -9479,7 +10308,7 @@ "networks": { "methods": { "addPeering": { - "description": "Adds a peering to the specified network. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds a peering to the specified network.", "httpMethod": "POST", "id": "compute.networks.addPeering", "parameterOrder": [ @@ -9520,7 +10349,7 @@ ] }, "delete": { - "description": "Deletes the specified network. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified network.", "httpMethod": "DELETE", "id": "compute.networks.delete", "parameterOrder": [ @@ -9558,7 +10387,7 @@ ] }, "get": { - "description": "Returns the specified network. Gets a list of available networks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified network. Gets a list of available networks by making a list() request.", "httpMethod": "GET", "id": "compute.networks.get", "parameterOrder": [ @@ -9592,7 +10421,7 @@ ] }, "insert": { - "description": "Creates a network in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a network in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.networks.insert", "parameterOrder": [ @@ -9625,7 +10454,7 @@ ] }, "list": { - "description": "Retrieves the list of networks available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of networks available to the specified project.", "httpMethod": "GET", "id": "compute.networks.list", "parameterOrder": [ @@ -9633,25 +10462,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -9673,8 +10502,88 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listPeeringRoutes": { + "description": "Lists the peering routes exchanged over peering connection.", + "httpMethod": "GET", + "id": "compute.networks.listPeeringRoutes", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "direction": { + "description": "The direction of the exchanged routes.", + "enum": [ + "INCOMING", + "OUTGOING" + ], + "enumDescriptions": [ + "", + "" + ], + "location": "query", + "type": "string" + }, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "network": { + "description": "Name of the network for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "peeringName": { + "description": "The response will show routes exchanged over the given peering connection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networks/{network}/listPeeringRoutes", + "response": { + "$ref": "ExchangedPeeringRoutesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "patch": { - "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", "httpMethod": "PATCH", "id": "compute.networks.patch", "parameterOrder": [ @@ -9715,7 +10624,7 @@ ] }, "removePeering": { - "description": "Removes a peering from the specified network. (== suppress_warning http-rest-shadowed ==)", + "description": "Removes a peering from the specified network.", "httpMethod": "POST", "id": "compute.networks.removePeering", "parameterOrder": [ @@ -9756,7 +10665,7 @@ ] }, "switchToCustomMode": { - "description": "Switches the network mode from auto subnet mode to custom subnet mode. (== suppress_warning http-rest-shadowed ==)", + "description": "Switches the network mode from auto subnet mode to custom subnet mode.", "httpMethod": "POST", "id": "compute.networks.switchToCustomMode", "parameterOrder": [ @@ -9794,7 +10703,7 @@ ] }, "updatePeering": { - "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes", "httpMethod": "PATCH", "id": "compute.networks.updatePeering", "parameterOrder": [ @@ -9839,7 +10748,7 @@ "nodeGroups": { "methods": { "addNodes": { - "description": "Adds specified number of nodes to the node group. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds specified number of nodes to the node group.", "httpMethod": "POST", "id": "compute.nodeGroups.addNodes", "parameterOrder": [ @@ -9888,7 +10797,7 @@ ] }, "aggregatedList": { - "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", "httpMethod": "GET", "id": "compute.nodeGroups.aggregatedList", "parameterOrder": [ @@ -9896,25 +10805,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -9937,7 +10851,7 @@ ] }, "delete": { - "description": "Deletes the specified NodeGroup resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified NodeGroup resource.", "httpMethod": "DELETE", "id": "compute.nodeGroups.delete", "parameterOrder": [ @@ -9983,7 +10897,7 @@ ] }, "deleteNodes": { - "description": "Deletes specified nodes from the node group. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes specified nodes from the node group.", "httpMethod": "POST", "id": "compute.nodeGroups.deleteNodes", "parameterOrder": [ @@ -10032,7 +10946,7 @@ ] }, "get": { - "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", "httpMethod": "GET", "id": "compute.nodeGroups.get", "parameterOrder": [ @@ -10074,7 +10988,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.nodeGroups.getIamPolicy", "parameterOrder": [ @@ -10116,7 +11030,7 @@ ] }, "insert": { - "description": "Creates a NodeGroup resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.nodeGroups.insert", "parameterOrder": [ @@ -10165,7 +11079,7 @@ ] }, "list": { - "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", "httpMethod": "GET", "id": "compute.nodeGroups.list", "parameterOrder": [ @@ -10174,25 +11088,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -10222,7 +11136,7 @@ ] }, "listNodes": { - "description": "Lists nodes in the node group. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists nodes in the node group.", "httpMethod": "POST", "id": "compute.nodeGroups.listNodes", "parameterOrder": [ @@ -10232,13 +11146,13 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", @@ -10252,12 +11166,12 @@ "type": "string" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -10286,54 +11200,10 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.nodeGroups.setIamPolicy", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", - "request": { - "$ref": "ZoneSetPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setNodeTemplate": { - "description": "Updates the node template of the node group. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.nodeGroups.setNodeTemplate", + "patch": { + "description": "Patch the node group.", + "httpMethod": "PATCH", + "id": "compute.nodeGroups.patch", "parameterOrder": [ "project", "zone", @@ -10367,9 +11237,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", "request": { - "$ref": "NodeGroupsSetNodeTemplateRequest" + "$ref": "NodeGroup" }, "response": { "$ref": "Operation" @@ -10379,10 +11249,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", - "id": "compute.nodeGroups.testIamPermissions", + "id": "compute.nodeGroups.setIamPolicy", "parameterOrder": [ "project", "zone", @@ -10411,84 +11281,30 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", + "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "nodeTemplates": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of node templates. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.nodeTemplates.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } + "$ref": "ZoneSetPolicyRequest" }, - "path": "{project}/aggregated/nodeTemplates", "response": { - "$ref": "NodeTemplateAggregatedList" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "delete": { - "description": "Deletes the specified NodeTemplate resource. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "DELETE", - "id": "compute.nodeTemplates.delete", + "setNodeTemplate": { + "description": "Updates the node template of the node group.", + "httpMethod": "POST", + "id": "compute.nodeGroups.setNodeTemplate", "parameterOrder": [ "project", - "region", - "nodeTemplate" + "zone", + "nodeGroup" ], "parameters": { - "nodeTemplate": { - "description": "Name of the NodeTemplate resource to delete.", + "nodeGroup": { + "description": "Name of the NodeGroup resource to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -10501,77 +11317,38 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - } - }, - "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified node template. Gets a list of available node templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.nodeTemplates.get", - "parameterOrder": [ - "project", - "region", - "nodeTemplate" - ], - "parameters": { - "nodeTemplate": { - "description": "Name of the node template to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + "request": { + "$ref": "NodeGroupsSetNodeTemplateRequest" + }, "response": { - "$ref": "NodeTemplate" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.nodeTemplates.getIamPolicy", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.nodeGroups.testIamPermissions", "parameterOrder": [ "project", - "region", + "zone", "resource" ], "parameters": { @@ -10582,101 +11359,292 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "resource": { "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - } - }, - "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a NodeTemplate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.nodeTemplates.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "{project}/regions/{region}/nodeTemplates", + "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", "request": { - "$ref": "NodeTemplate" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "list": { - "description": "Retrieves a list of node templates available to the specified project. (== suppress_warning http-rest-shadowed ==)", + } + } + }, + "nodeTemplates": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of node templates.", "httpMethod": "GET", - "id": "compute.nodeTemplates.list", + "id": "compute.nodeTemplates.aggregatedList", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/nodeTemplates", + "response": { + "$ref": "NodeTemplateAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified NodeTemplate resource.", + "httpMethod": "DELETE", + "id": "compute.nodeTemplates.delete", + "parameterOrder": [ + "project", + "region", + "nodeTemplate" + ], + "parameters": { + "nodeTemplate": { + "description": "Name of the NodeTemplate resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + "httpMethod": "GET", + "id": "compute.nodeTemplates.get", + "parameterOrder": [ + "project", + "region", + "nodeTemplate" + ], + "parameters": { + "nodeTemplate": { + "description": "Name of the node template to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + "response": { + "$ref": "NodeTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.nodeTemplates.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.nodeTemplates.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/nodeTemplates", + "request": { + "$ref": "NodeTemplate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of node templates available to the specified project.", + "httpMethod": "GET", + "id": "compute.nodeTemplates.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -10706,7 +11674,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.nodeTemplates.setIamPolicy", "parameterOrder": [ @@ -10750,7 +11718,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.nodeTemplates.testIamPermissions", "parameterOrder": [ @@ -10781,7 +11749,502 @@ "type": "string" } }, - "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "nodeTypes": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of node types.", + "httpMethod": "GET", + "id": "compute.nodeTypes.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/nodeTypes", + "response": { + "$ref": "NodeTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + "httpMethod": "GET", + "id": "compute.nodeTypes.get", + "parameterOrder": [ + "project", + "zone", + "nodeType" + ], + "parameters": { + "nodeType": { + "description": "Name of the node type to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", + "response": { + "$ref": "NodeType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of node types available to the specified project.", + "httpMethod": "GET", + "id": "compute.nodeTypes.list", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/nodeTypes", + "response": { + "$ref": "NodeTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "packetMirrorings": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of packetMirrorings.", + "httpMethod": "GET", + "id": "compute.packetMirrorings.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/packetMirrorings", + "response": { + "$ref": "PacketMirroringAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified PacketMirroring resource.", + "httpMethod": "DELETE", + "id": "compute.packetMirrorings.delete", + "parameterOrder": [ + "project", + "region", + "packetMirroring" + ], + "parameters": { + "packetMirroring": { + "description": "Name of the PacketMirroring resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified PacketMirroring resource.", + "httpMethod": "GET", + "id": "compute.packetMirrorings.get", + "parameterOrder": [ + "project", + "region", + "packetMirroring" + ], + "parameters": { + "packetMirroring": { + "description": "Name of the PacketMirroring resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "response": { + "$ref": "PacketMirroring" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a PacketMirroring resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.packetMirrorings.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/packetMirrorings", + "request": { + "$ref": "PacketMirroring" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of PacketMirroring resources available to the specified project and region.", + "httpMethod": "GET", + "id": "compute.packetMirrorings.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/packetMirrorings", + "response": { + "$ref": "PacketMirroringList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.packetMirrorings.patch", + "parameterOrder": [ + "project", + "region", + "packetMirroring" + ], + "parameters": { + "packetMirroring": { + "description": "Name of the PacketMirroring resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "request": { + "$ref": "PacketMirroring" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.packetMirrorings.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -10796,162 +12259,10 @@ } } }, - "nodeTypes": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of node types. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.nodeTypes.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/aggregated/nodeTypes", - "response": { - "$ref": "NodeTypeAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "get": { - "description": "Returns the specified node type. Gets a list of available node types by making a list() request. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.nodeTypes.get", - "parameterOrder": [ - "project", - "zone", - "nodeType" - ], - "parameters": { - "nodeType": { - "description": "Name of the node type to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", - "response": { - "$ref": "NodeType" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "description": "Retrieves a list of node types available to the specified project. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.nodeTypes.list", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/nodeTypes", - "response": { - "$ref": "NodeTypeList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, "projects": { "methods": { "disableXpnHost": { - "description": "Disable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", + "description": "Disable this project as a shared VPC host project.", "httpMethod": "POST", "id": "compute.projects.disableXpnHost", "parameterOrder": [ @@ -10981,7 +12292,7 @@ ] }, "disableXpnResource": { - "description": "Disable a service resource (also known as service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", + "description": "Disable a service resource (also known as service project) associated with this host project.", "httpMethod": "POST", "id": "compute.projects.disableXpnResource", "parameterOrder": [ @@ -11014,7 +12325,7 @@ ] }, "enableXpnHost": { - "description": "Enable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", + "description": "Enable this project as a shared VPC host project.", "httpMethod": "POST", "id": "compute.projects.enableXpnHost", "parameterOrder": [ @@ -11044,7 +12355,7 @@ ] }, "enableXpnResource": { - "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project. (== suppress_warning http-rest-shadowed ==)", + "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", "httpMethod": "POST", "id": "compute.projects.enableXpnResource", "parameterOrder": [ @@ -11077,7 +12388,7 @@ ] }, "get": { - "description": "Returns the specified Project resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Project resource.", "httpMethod": "GET", "id": "compute.projects.get", "parameterOrder": [ @@ -11103,7 +12414,7 @@ ] }, "getXpnHost": { - "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", "httpMethod": "GET", "id": "compute.projects.getXpnHost", "parameterOrder": [ @@ -11128,7 +12439,7 @@ ] }, "getXpnResources": { - "description": "Gets service resources (a.k.a service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets service resources (a.k.a service project) associated with this host project.", "httpMethod": "GET", "id": "compute.projects.getXpnResources", "parameterOrder": [ @@ -11136,25 +12447,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, - "order_by": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -11176,7 +12487,7 @@ ] }, "listXpnHosts": { - "description": "Lists all shared VPC host projects visible to the user in an organization. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists all shared VPC host projects visible to the user in an organization.", "httpMethod": "POST", "id": "compute.projects.listXpnHosts", "parameterOrder": [ @@ -11184,25 +12495,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, - "order_by": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -11227,7 +12538,7 @@ ] }, "moveDisk": { - "description": "Moves a persistent disk from one zone to another. (== suppress_warning http-rest-shadowed ==)", + "description": "Moves a persistent disk from one zone to another.", "httpMethod": "POST", "id": "compute.projects.moveDisk", "parameterOrder": [ @@ -11260,7 +12571,7 @@ ] }, "moveInstance": { - "description": "Moves an instance and its attached persistent disks from one zone to another. (== suppress_warning http-rest-shadowed ==)", + "description": "Moves an instance and its attached persistent disks from one zone to another.", "httpMethod": "POST", "id": "compute.projects.moveInstance", "parameterOrder": [ @@ -11293,7 +12604,7 @@ ] }, "setCommonInstanceMetadata": { - "description": "Sets metadata common to all instances within the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.projects.setCommonInstanceMetadata", "parameterOrder": [ @@ -11326,7 +12637,7 @@ ] }, "setDefaultNetworkTier": { - "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", "httpMethod": "POST", "id": "compute.projects.setDefaultNetworkTier", "parameterOrder": [ @@ -11359,7 +12670,7 @@ ] }, "setUsageExportBucket": { - "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled. (== suppress_warning http-rest-shadowed ==)", + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", "httpMethod": "POST", "id": "compute.projects.setUsageExportBucket", "parameterOrder": [ @@ -11399,7 +12710,7 @@ "regionAutoscalers": { "methods": { "delete": { - "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified autoscaler.", "httpMethod": "DELETE", "id": "compute.regionAutoscalers.delete", "parameterOrder": [ @@ -11445,7 +12756,7 @@ ] }, "get": { - "description": "Returns the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified autoscaler.", "httpMethod": "GET", "id": "compute.regionAutoscalers.get", "parameterOrder": [ @@ -11487,7 +12798,7 @@ ] }, "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates an autoscaler in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.regionAutoscalers.insert", "parameterOrder": [ @@ -11528,7 +12839,7 @@ ] }, "list": { - "description": "Retrieves a list of autoscalers contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of autoscalers contained within the specified region.", "httpMethod": "GET", "id": "compute.regionAutoscalers.list", "parameterOrder": [ @@ -11537,25 +12848,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -11585,7 +12896,7 @@ ] }, "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionAutoscalers.patch", "parameterOrder": [ @@ -11632,7 +12943,7 @@ ] }, "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates an autoscaler in the specified project using the data included in the request.", "httpMethod": "PUT", "id": "compute.regionAutoscalers.update", "parameterOrder": [ @@ -11683,7 +12994,7 @@ "regionBackendServices": { "methods": { "delete": { - "description": "Deletes the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified regional BackendService resource.", "httpMethod": "DELETE", "id": "compute.regionBackendServices.delete", "parameterOrder": [ @@ -11729,7 +13040,7 @@ ] }, "get": { - "description": "Returns the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified regional BackendService resource.", "httpMethod": "GET", "id": "compute.regionBackendServices.get", "parameterOrder": [ @@ -11771,7 +13082,7 @@ ] }, "getHealth": { - "description": "Gets the most recent health check results for this regional BackendService. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the most recent health check results for this regional BackendService.", "httpMethod": "POST", "id": "compute.regionBackendServices.getHealth", "parameterOrder": [ @@ -11815,7 +13126,7 @@ ] }, "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", "httpMethod": "POST", "id": "compute.regionBackendServices.insert", "parameterOrder": [ @@ -11856,7 +13167,7 @@ ] }, "list": { - "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", "httpMethod": "GET", "id": "compute.regionBackendServices.list", "parameterOrder": [ @@ -11865,25 +13176,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -11913,7 +13224,7 @@ ] }, "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionBackendServices.patch", "parameterOrder": [ @@ -11962,7 +13273,7 @@ ] }, "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", "httpMethod": "PUT", "id": "compute.regionBackendServices.update", "parameterOrder": [ @@ -12015,7 +13326,7 @@ "regionCommitments": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of commitments. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of commitments.", "httpMethod": "GET", "id": "compute.regionCommitments.aggregatedList", "parameterOrder": [ @@ -12023,25 +13334,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -12064,7 +13380,7 @@ ] }, "get": { - "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", "httpMethod": "GET", "id": "compute.regionCommitments.get", "parameterOrder": [ @@ -12106,7 +13422,7 @@ ] }, "insert": { - "description": "Creates a commitment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a commitment in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.regionCommitments.insert", "parameterOrder": [ @@ -12147,7 +13463,7 @@ ] }, "list": { - "description": "Retrieves a list of commitments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of commitments contained within the specified region.", "httpMethod": "GET", "id": "compute.regionCommitments.list", "parameterOrder": [ @@ -12156,25 +13472,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -12208,7 +13524,7 @@ "regionDiskTypes": { "methods": { "get": { - "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", "httpMethod": "GET", "id": "compute.regionDiskTypes.get", "parameterOrder": [ @@ -12250,7 +13566,7 @@ ] }, "list": { - "description": "Retrieves a list of regional disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of regional disk types available to the specified project.", "httpMethod": "GET", "id": "compute.regionDiskTypes.list", "parameterOrder": [ @@ -12259,25 +13575,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -12311,7 +13627,7 @@ "regionDisks": { "methods": { "addResourcePolicies": { - "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", "httpMethod": "POST", "id": "compute.regionDisks.addResourcePolicies", "parameterOrder": [ @@ -12360,7 +13676,7 @@ ] }, "createSnapshot": { - "description": "Creates a snapshot of this regional disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a snapshot of this regional disk.", "httpMethod": "POST", "id": "compute.regionDisks.createSnapshot", "parameterOrder": [ @@ -12409,7 +13725,7 @@ ] }, "delete": { - "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", "httpMethod": "DELETE", "id": "compute.regionDisks.delete", "parameterOrder": [ @@ -12454,7 +13770,7 @@ ] }, "get": { - "description": "Returns a specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns a specified regional persistent disk.", "httpMethod": "GET", "id": "compute.regionDisks.get", "parameterOrder": [ @@ -12496,7 +13812,7 @@ ] }, "insert": { - "description": "Creates a persistent regional disk in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a persistent regional disk in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.regionDisks.insert", "parameterOrder": [ @@ -12542,7 +13858,7 @@ ] }, "list": { - "description": "Retrieves the list of persistent disks contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of persistent disks contained within the specified region.", "httpMethod": "GET", "id": "compute.regionDisks.list", "parameterOrder": [ @@ -12551,25 +13867,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -12599,7 +13915,7 @@ ] }, "removeResourcePolicies": { - "description": "Removes resource policies from a regional disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Removes resource policies from a regional disk.", "httpMethod": "POST", "id": "compute.regionDisks.removeResourcePolicies", "parameterOrder": [ @@ -12648,7 +13964,7 @@ ] }, "resize": { - "description": "Resizes the specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Resizes the specified regional persistent disk.", "httpMethod": "POST", "id": "compute.regionDisks.resize", "parameterOrder": [ @@ -12697,7 +14013,7 @@ ] }, "setLabels": { - "description": "Sets the labels on the target regional disk. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the labels on the target regional disk.", "httpMethod": "POST", "id": "compute.regionDisks.setLabels", "parameterOrder": [ @@ -12746,7 +14062,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.regionDisks.testIamPermissions", "parameterOrder": [ @@ -12795,7 +14111,7 @@ "regionHealthChecks": { "methods": { "delete": { - "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified HealthCheck resource.", "httpMethod": "DELETE", "id": "compute.regionHealthChecks.delete", "parameterOrder": [ @@ -12841,7 +14157,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", "httpMethod": "GET", "id": "compute.regionHealthChecks.get", "parameterOrder": [ @@ -12883,7 +14199,7 @@ ] }, "insert": { - "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.regionHealthChecks.insert", "parameterOrder": [ @@ -12924,7 +14240,7 @@ ] }, "list": { - "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of HealthCheck resources available to the specified project.", "httpMethod": "GET", "id": "compute.regionHealthChecks.list", "parameterOrder": [ @@ -12933,25 +14249,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -12981,7 +14297,7 @@ ] }, "patch": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionHealthChecks.patch", "parameterOrder": [ @@ -13030,7 +14346,7 @@ ] }, "update": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", "httpMethod": "PUT", "id": "compute.regionHealthChecks.update", "parameterOrder": [ @@ -13083,7 +14399,7 @@ "regionInstanceGroupManagers": { "methods": { "abandonInstances": { - "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.abandonInstances", "parameterOrder": [ @@ -13117,9 +14433,189 @@ "type": "string" } }, - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "request": { + "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "applyUpdatesToInstances": { + "description": "Apply updates to selected instances the managed instance group.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "request": { + "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "createInstances": { + "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.createInstances", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + "request": { + "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "description": "Deletes the specified managed instance group and all of the instances in that group.", + "httpMethod": "DELETE", + "id": "compute.regionInstanceGroupManagers.delete", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "Name of the managed instance group to delete.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteInstances": { + "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.deleteInstances", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "Name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", "request": { - "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" }, "response": { "$ref": "Operation" @@ -13129,10 +14625,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "delete": { - "description": "Deletes the specified managed instance group and all of the instances in that group. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "DELETE", - "id": "compute.regionInstanceGroupManagers.delete", + "get": { + "description": "Returns all of the details about the specified managed instance group.", + "httpMethod": "GET", + "id": "compute.regionInstanceGroupManagers.get", "parameterOrder": [ "project", "region", @@ -13140,7 +14636,7 @@ ], "parameters": { "instanceGroupManager": { - "description": "Name of the managed instance group to delete.", + "description": "Name of the managed instance group to return.", "location": "path", "required": true, "type": "string" @@ -13157,38 +14653,27 @@ "location": "path", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", "response": { - "$ref": "Operation" + "$ref": "InstanceGroupManager" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "deleteInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + "insert": { + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.deleteInstances", + "id": "compute.regionInstanceGroupManagers.insert", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13208,9 +14693,9 @@ "type": "string" } }, - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + "path": "{project}/regions/{region}/instanceGroupManagers", "request": { - "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + "$ref": "InstanceGroupManager" }, "response": { "$ref": "Operation" @@ -13220,55 +14705,38 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns all of the details about the specified managed instance group. (== suppress_warning http-rest-shadowed ==)", + "list": { + "description": "Retrieves the list of managed instance groups that are contained within the specified region.", "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.get", + "id": "compute.regionInstanceGroupManagers.list", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group to return.", - "location": "path", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "required": true, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" - } - }, - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "response": { - "$ref": "InstanceGroupManager" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13281,54 +14749,54 @@ "location": "path", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, "path": "{project}/regions/{region}/instanceGroupManagers", - "request": { - "$ref": "InstanceGroupManager" - }, "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupManagerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "description": "Retrieves the list of managed instance groups that are contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "listErrors": { + "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.list", + "id": "compute.regionInstanceGroupManagers.listErrors", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", + "location": "path", + "required": true, + "type": "string" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -13340,15 +14808,15 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request. This should conform to RFC1035.", "location": "path", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/instanceGroupManagers", + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", "response": { - "$ref": "RegionInstanceGroupManagerList" + "$ref": "RegionInstanceGroupManagersListErrorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13357,7 +14825,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -13367,7 +14835,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, @@ -13379,19 +14847,19 @@ }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, - "order_by": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -13420,7 +14888,7 @@ ] }, "patch": { - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionInstanceGroupManagers.patch", "parameterOrder": [ @@ -13467,7 +14935,7 @@ ] }, "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.recreateInstances", "parameterOrder": [ @@ -13514,7 +14982,7 @@ ] }, "resize": { - "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.resize", "parameterOrder": [ @@ -13567,7 +15035,7 @@ ] }, "setInstanceTemplate": { - "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", "parameterOrder": [ @@ -13614,7 +15082,7 @@ ] }, "setTargetPools": { - "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected. (== suppress_warning http-rest-shadowed ==)", + "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.setTargetPools", "parameterOrder": [ @@ -13665,7 +15133,7 @@ "regionInstanceGroups": { "methods": { "get": { - "description": "Returns the specified instance group resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified instance group resource.", "httpMethod": "GET", "id": "compute.regionInstanceGroups.get", "parameterOrder": [ @@ -13705,7 +15173,7 @@ ] }, "list": { - "description": "Retrieves the list of instance group resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of instance group resources contained within the specified region.", "httpMethod": "GET", "id": "compute.regionInstanceGroups.list", "parameterOrder": [ @@ -13714,25 +15182,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -13761,7 +15229,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", "httpMethod": "POST", "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ @@ -13771,7 +15239,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, @@ -13783,19 +15251,209 @@ }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + "request": { + "$ref": "RegionInstanceGroupsListInstancesRequest" + }, + "response": { + "$ref": "RegionInstanceGroupsListInstances" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setNamedPorts": { + "description": "Sets the named ports for the specified regional instance group.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroups.setNamedPorts", + "parameterOrder": [ + "project", + "region", + "instanceGroup" + ], + "parameters": { + "instanceGroup": { + "description": "The name of the regional instance group where the named ports are updated.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + "request": { + "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "description": "Deletes the specified region-specific Operations resource.", + "httpMethod": "DELETE", + "id": "compute.regionOperations.delete", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves the specified region-specific Operations resource.", + "httpMethod": "GET", + "id": "compute.regionOperations.get", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified region.", + "httpMethod": "GET", + "id": "compute.regionOperations.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -13807,18 +15465,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - "request": { - "$ref": "RegionInstanceGroupsListInstancesRequest" - }, + "path": "{project}/regions/{region}/operations", "response": { - "$ref": "RegionInstanceGroupsListInstances" + "$ref": "OperationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13826,99 +15482,10 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setNamedPorts": { - "description": "Sets the named ports for the specified regional instance group. (== suppress_warning http-rest-shadowed ==)", + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", "httpMethod": "POST", - "id": "compute.regionInstanceGroups.setNamedPorts", - "parameterOrder": [ - "project", - "region", - "instanceGroup" - ], - "parameters": { - "instanceGroup": { - "description": "The name of the regional instance group where the named ports are updated.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - "request": { - "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionOperations": { - "methods": { - "delete": { - "description": "Deletes the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "DELETE", - "id": "compute.regionOperations.delete", - "parameterOrder": [ - "project", - "region", - "operation" - ], - "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/operations/{operation}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Retrieves the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.regionOperations.get", + "id": "compute.regionOperations.wait", "parameterOrder": [ "project", "region", @@ -13947,7 +15514,7 @@ "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/operations/{operation}/wait", "response": { "$ref": "Operation" }, @@ -13956,70 +15523,13 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "list": { - "description": "Retrieves a list of Operation resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", - "httpMethod": "GET", - "id": "compute.regionOperations.list", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/operations", - "response": { - "$ref": "OperationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] } } }, "regionSslCertificates": { "methods": { "delete": { - "description": "Deletes the specified SslCertificate resource in the region. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified SslCertificate resource in the region.", "httpMethod": "DELETE", "id": "compute.regionSslCertificates.delete", "parameterOrder": [ @@ -14065,7 +15575,7 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", "httpMethod": "GET", "id": "compute.regionSslCertificates.get", "parameterOrder": [ @@ -14107,7 +15617,7 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", "httpMethod": "POST", "id": "compute.regionSslCertificates.insert", "parameterOrder": [ @@ -14148,7 +15658,7 @@ ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", "httpMethod": "GET", "id": "compute.regionSslCertificates.list", "parameterOrder": [ @@ -14157,25 +15667,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -14209,7 +15719,7 @@ "regionTargetHttpProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetHttpProxy resource.", "httpMethod": "DELETE", "id": "compute.regionTargetHttpProxies.delete", "parameterOrder": [ @@ -14255,7 +15765,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", "httpMethod": "GET", "id": "compute.regionTargetHttpProxies.get", "parameterOrder": [ @@ -14297,7 +15807,7 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.regionTargetHttpProxies.insert", "parameterOrder": [ @@ -14338,7 +15848,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", "httpMethod": "GET", "id": "compute.regionTargetHttpProxies.list", "parameterOrder": [ @@ -14347,25 +15857,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -14395,7 +15905,7 @@ ] }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", "id": "compute.regionTargetHttpProxies.setUrlMap", "parameterOrder": [ @@ -14448,7 +15958,7 @@ "regionTargetHttpsProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetHttpsProxy resource.", "httpMethod": "DELETE", "id": "compute.regionTargetHttpsProxies.delete", "parameterOrder": [ @@ -14494,7 +16004,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", "httpMethod": "GET", "id": "compute.regionTargetHttpsProxies.get", "parameterOrder": [ @@ -14536,7 +16046,7 @@ ] }, "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.regionTargetHttpsProxies.insert", "parameterOrder": [ @@ -14577,7 +16087,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", "httpMethod": "GET", "id": "compute.regionTargetHttpsProxies.list", "parameterOrder": [ @@ -14586,25 +16096,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -14634,7 +16144,7 @@ ] }, "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Replaces SslCertificates for TargetHttpsProxy.", "httpMethod": "POST", "id": "compute.regionTargetHttpsProxies.setSslCertificates", "parameterOrder": [ @@ -14683,7 +16193,7 @@ ] }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the URL map for TargetHttpsProxy.", "httpMethod": "POST", "id": "compute.regionTargetHttpsProxies.setUrlMap", "parameterOrder": [ @@ -14736,7 +16246,7 @@ "regionUrlMaps": { "methods": { "delete": { - "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified UrlMap resource.", "httpMethod": "DELETE", "id": "compute.regionUrlMaps.delete", "parameterOrder": [ @@ -14782,7 +16292,7 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", "httpMethod": "GET", "id": "compute.regionUrlMaps.get", "parameterOrder": [ @@ -14824,7 +16334,7 @@ ] }, "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.regionUrlMaps.insert", "parameterOrder": [ @@ -14865,7 +16375,7 @@ ] }, "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", "httpMethod": "GET", "id": "compute.regionUrlMaps.list", "parameterOrder": [ @@ -14874,25 +16384,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -14922,7 +16432,7 @@ ] }, "patch": { - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionUrlMaps.patch", "parameterOrder": [ @@ -14971,7 +16481,7 @@ ] }, "update": { - "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified UrlMap resource with the data included in the request.", "httpMethod": "PUT", "id": "compute.regionUrlMaps.update", "parameterOrder": [ @@ -15020,7 +16530,7 @@ ] }, "validate": { - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", "httpMethod": "POST", "id": "compute.regionUrlMaps.validate", "parameterOrder": [ @@ -15068,7 +16578,7 @@ "regions": { "methods": { "get": { - "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", "httpMethod": "GET", "id": "compute.regions.get", "parameterOrder": [ @@ -15102,7 +16612,7 @@ ] }, "list": { - "description": "Retrieves the list of region resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of region resources available to the specified project.", "httpMethod": "GET", "id": "compute.regions.list", "parameterOrder": [ @@ -15110,25 +16620,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -15155,7 +16665,7 @@ "reservations": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of reservations. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of reservations.", "httpMethod": "GET", "id": "compute.reservations.aggregatedList", "parameterOrder": [ @@ -15163,25 +16673,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -15204,7 +16719,7 @@ ] }, "delete": { - "description": "Deletes the specified reservation. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified reservation.", "httpMethod": "DELETE", "id": "compute.reservations.delete", "parameterOrder": [ @@ -15250,7 +16765,7 @@ ] }, "get": { - "description": "Retrieves information about the specified reservation. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves information about the specified reservation.", "httpMethod": "GET", "id": "compute.reservations.get", "parameterOrder": [ @@ -15292,7 +16807,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.reservations.getIamPolicy", "parameterOrder": [ @@ -15334,7 +16849,7 @@ ] }, "insert": { - "description": "Creates a new reservation. For more information, read Reserving zonal resources. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a new reservation. For more information, read Reserving zonal resources.", "httpMethod": "POST", "id": "compute.reservations.insert", "parameterOrder": [ @@ -15375,7 +16890,7 @@ ] }, "list": { - "description": "A list of all the reservations that have been configured for the specified project in specified zone. (== suppress_warning http-rest-shadowed ==)", + "description": "A list of all the reservations that have been configured for the specified project in specified zone.", "httpMethod": "GET", "id": "compute.reservations.list", "parameterOrder": [ @@ -15384,25 +16899,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -15432,7 +16947,7 @@ ] }, "resize": { - "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations. (== suppress_warning http-rest-shadowed ==)", + "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations.", "httpMethod": "POST", "id": "compute.reservations.resize", "parameterOrder": [ @@ -15481,7 +16996,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.reservations.setIamPolicy", "parameterOrder": [ @@ -15525,7 +17040,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.reservations.testIamPermissions", "parameterOrder": [ @@ -15574,7 +17089,7 @@ "resourcePolicies": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of resource policies. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of resource policies.", "httpMethod": "GET", "id": "compute.resourcePolicies.aggregatedList", "parameterOrder": [ @@ -15582,25 +17097,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -15623,7 +17143,7 @@ ] }, "delete": { - "description": "Deletes the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified resource policy.", "httpMethod": "DELETE", "id": "compute.resourcePolicies.delete", "parameterOrder": [ @@ -15669,7 +17189,7 @@ ] }, "get": { - "description": "Retrieves all information of the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves all information of the specified resource policy.", "httpMethod": "GET", "id": "compute.resourcePolicies.get", "parameterOrder": [ @@ -15711,7 +17231,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.resourcePolicies.getIamPolicy", "parameterOrder": [ @@ -15753,7 +17273,7 @@ ] }, "insert": { - "description": "Creates a new resource policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a new resource policy.", "httpMethod": "POST", "id": "compute.resourcePolicies.insert", "parameterOrder": [ @@ -15794,7 +17314,7 @@ ] }, "list": { - "description": "A list all the resource policies that have been configured for the specified project in specified region. (== suppress_warning http-rest-shadowed ==)", + "description": "A list all the resource policies that have been configured for the specified project in specified region.", "httpMethod": "GET", "id": "compute.resourcePolicies.list", "parameterOrder": [ @@ -15803,25 +17323,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -15851,7 +17371,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.resourcePolicies.setIamPolicy", "parameterOrder": [ @@ -15895,7 +17415,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.resourcePolicies.testIamPermissions", "parameterOrder": [ @@ -15944,7 +17464,7 @@ "routers": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of routers. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of routers.", "httpMethod": "GET", "id": "compute.routers.aggregatedList", "parameterOrder": [ @@ -15952,25 +17472,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -15993,7 +17518,7 @@ ] }, "delete": { - "description": "Deletes the specified Router resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified Router resource.", "httpMethod": "DELETE", "id": "compute.routers.delete", "parameterOrder": [ @@ -16039,7 +17564,7 @@ ] }, "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", "httpMethod": "GET", "id": "compute.routers.get", "parameterOrder": [ @@ -16081,7 +17606,7 @@ ] }, "getNatMappingInfo": { - "description": "Retrieves runtime Nat mapping information of VM endpoints. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves runtime Nat mapping information of VM endpoints.", "httpMethod": "GET", "id": "compute.routers.getNatMappingInfo", "parameterOrder": [ @@ -16091,25 +17616,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -16146,7 +17671,7 @@ ] }, "getRouterStatus": { - "description": "Retrieves runtime information of the specified router. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves runtime information of the specified router.", "httpMethod": "GET", "id": "compute.routers.getRouterStatus", "parameterOrder": [ @@ -16188,7 +17713,7 @@ ] }, "insert": { - "description": "Creates a Router resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a Router resource in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.routers.insert", "parameterOrder": [ @@ -16229,7 +17754,7 @@ ] }, "list": { - "description": "Retrieves a list of Router resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of Router resources available to the specified project.", "httpMethod": "GET", "id": "compute.routers.list", "parameterOrder": [ @@ -16238,25 +17763,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -16286,7 +17811,7 @@ ] }, "patch": { - "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.routers.patch", "parameterOrder": [ @@ -16335,7 +17860,7 @@ ] }, "preview": { - "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router. (== suppress_warning http-rest-shadowed ==)", + "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", "httpMethod": "POST", "id": "compute.routers.preview", "parameterOrder": [ @@ -16380,7 +17905,7 @@ ] }, "update": { - "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload.", "httpMethod": "PUT", "id": "compute.routers.update", "parameterOrder": [ @@ -16433,7 +17958,7 @@ "routes": { "methods": { "delete": { - "description": "Deletes the specified Route resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified Route resource.", "httpMethod": "DELETE", "id": "compute.routes.delete", "parameterOrder": [ @@ -16471,7 +17996,7 @@ ] }, "get": { - "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", "httpMethod": "GET", "id": "compute.routes.get", "parameterOrder": [ @@ -16505,7 +18030,7 @@ ] }, "insert": { - "description": "Creates a Route resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a Route resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.routes.insert", "parameterOrder": [ @@ -16538,7 +18063,7 @@ ] }, "list": { - "description": "Retrieves the list of Route resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of Route resources available to the specified project.", "httpMethod": "GET", "id": "compute.routes.list", "parameterOrder": [ @@ -16546,25 +18071,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -16591,7 +18116,7 @@ "securityPolicies": { "methods": { "addRule": { - "description": "Inserts a rule into a security policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Inserts a rule into a security policy.", "httpMethod": "POST", "id": "compute.securityPolicies.addRule", "parameterOrder": [ @@ -16627,7 +18152,7 @@ ] }, "delete": { - "description": "Deletes the specified policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified policy.", "httpMethod": "DELETE", "id": "compute.securityPolicies.delete", "parameterOrder": [ @@ -16665,7 +18190,7 @@ ] }, "get": { - "description": "List all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", + "description": "List all of the ordered rules present in a single specified policy.", "httpMethod": "GET", "id": "compute.securityPolicies.get", "parameterOrder": [ @@ -16699,7 +18224,7 @@ ] }, "getRule": { - "description": "Gets a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets a rule at the specified priority.", "httpMethod": "GET", "id": "compute.securityPolicies.getRule", "parameterOrder": [ @@ -16739,7 +18264,7 @@ ] }, "insert": { - "description": "Creates a new policy in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a new policy in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.securityPolicies.insert", "parameterOrder": [ @@ -16772,7 +18297,7 @@ ] }, "list": { - "description": "List all the policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "List all the policies that have been configured for the specified project.", "httpMethod": "GET", "id": "compute.securityPolicies.list", "parameterOrder": [ @@ -16780,25 +18305,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -16820,8 +18345,56 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listPreconfiguredExpressionSets": { + "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", + "httpMethod": "GET", + "id": "compute.securityPolicies.listPreconfiguredExpressionSets", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", + "response": { + "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "patch": { - "description": "Patches the specified policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified policy with the data included in the request.", "httpMethod": "PATCH", "id": "compute.securityPolicies.patch", "parameterOrder": [ @@ -16862,7 +18435,7 @@ ] }, "patchRule": { - "description": "Patches a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches a rule at the specified priority.", "httpMethod": "POST", "id": "compute.securityPolicies.patchRule", "parameterOrder": [ @@ -16904,7 +18477,7 @@ ] }, "removeRule": { - "description": "Deletes a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes a rule at the specified priority.", "httpMethod": "POST", "id": "compute.securityPolicies.removeRule", "parameterOrder": [ @@ -16947,7 +18520,7 @@ "snapshots": { "methods": { "delete": { - "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", "httpMethod": "DELETE", "id": "compute.snapshots.delete", "parameterOrder": [ @@ -16985,7 +18558,7 @@ ] }, "get": { - "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", "httpMethod": "GET", "id": "compute.snapshots.get", "parameterOrder": [ @@ -17019,7 +18592,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.snapshots.getIamPolicy", "parameterOrder": [ @@ -17053,7 +18626,7 @@ ] }, "list": { - "description": "Retrieves the list of Snapshot resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of Snapshot resources contained within the specified project.", "httpMethod": "GET", "id": "compute.snapshots.list", "parameterOrder": [ @@ -17061,25 +18634,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17102,7 +18675,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.snapshots.setIamPolicy", "parameterOrder": [ @@ -17138,7 +18711,7 @@ ] }, "setLabels": { - "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", "id": "compute.snapshots.setLabels", "parameterOrder": [ @@ -17174,7 +18747,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.snapshots.testIamPermissions", "parameterOrder": [ @@ -17215,7 +18788,7 @@ "sslCertificates": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project.", "httpMethod": "GET", "id": "compute.sslCertificates.aggregatedList", "parameterOrder": [ @@ -17223,25 +18796,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17264,7 +18842,7 @@ ] }, "delete": { - "description": "Deletes the specified SslCertificate resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified SslCertificate resource.", "httpMethod": "DELETE", "id": "compute.sslCertificates.delete", "parameterOrder": [ @@ -17302,7 +18880,7 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", "httpMethod": "GET", "id": "compute.sslCertificates.get", "parameterOrder": [ @@ -17336,7 +18914,7 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.sslCertificates.insert", "parameterOrder": [ @@ -17369,7 +18947,7 @@ ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of SslCertificate resources available to the specified project.", "httpMethod": "GET", "id": "compute.sslCertificates.list", "parameterOrder": [ @@ -17377,25 +18955,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17422,7 +19000,7 @@ "sslPolicies": { "methods": { "delete": { - "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", "httpMethod": "DELETE", "id": "compute.sslPolicies.delete", "parameterOrder": [ @@ -17459,7 +19037,7 @@ ] }, "get": { - "description": "Lists all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists all of the ordered rules present in a single specified policy.", "httpMethod": "GET", "id": "compute.sslPolicies.get", "parameterOrder": [ @@ -17492,7 +19070,7 @@ ] }, "insert": { - "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", "httpMethod": "POST", "id": "compute.sslPolicies.insert", "parameterOrder": [ @@ -17525,7 +19103,7 @@ ] }, "list": { - "description": "Lists all the SSL policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists all the SSL policies that have been configured for the specified project.", "httpMethod": "GET", "id": "compute.sslPolicies.list", "parameterOrder": [ @@ -17533,25 +19111,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17574,7 +19152,7 @@ ] }, "listAvailableFeatures": { - "description": "Lists all features that can be specified in the SSL policy when using custom profile. (== suppress_warning http-rest-shadowed ==)", + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", "httpMethod": "GET", "id": "compute.sslPolicies.listAvailableFeatures", "parameterOrder": [ @@ -17582,25 +19160,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17623,7 +19201,7 @@ ] }, "patch": { - "description": "Patches the specified SSL policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified SSL policy with the data included in the request.", "httpMethod": "PATCH", "id": "compute.sslPolicies.patch", "parameterOrder": [ @@ -17667,7 +19245,7 @@ "subnetworks": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of subnetworks. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of subnetworks.", "httpMethod": "GET", "id": "compute.subnetworks.aggregatedList", "parameterOrder": [ @@ -17675,25 +19253,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17716,7 +19299,7 @@ ] }, "delete": { - "description": "Deletes the specified subnetwork. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified subnetwork.", "httpMethod": "DELETE", "id": "compute.subnetworks.delete", "parameterOrder": [ @@ -17762,7 +19345,7 @@ ] }, "expandIpCidrRange": { - "description": "Expands the IP CIDR range of the subnetwork to a specified value. (== suppress_warning http-rest-shadowed ==)", + "description": "Expands the IP CIDR range of the subnetwork to a specified value.", "httpMethod": "POST", "id": "compute.subnetworks.expandIpCidrRange", "parameterOrder": [ @@ -17811,7 +19394,7 @@ ] }, "get": { - "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", "httpMethod": "GET", "id": "compute.subnetworks.get", "parameterOrder": [ @@ -17853,7 +19436,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", "id": "compute.subnetworks.getIamPolicy", "parameterOrder": [ @@ -17895,7 +19478,7 @@ ] }, "insert": { - "description": "Creates a subnetwork in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a subnetwork in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.subnetworks.insert", "parameterOrder": [ @@ -17936,7 +19519,7 @@ ] }, "list": { - "description": "Retrieves a list of subnetworks available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of subnetworks available to the specified project.", "httpMethod": "GET", "id": "compute.subnetworks.list", "parameterOrder": [ @@ -17945,25 +19528,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -17993,7 +19576,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", "httpMethod": "GET", "id": "compute.subnetworks.listUsable", "parameterOrder": [ @@ -18001,25 +19584,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -18042,7 +19625,7 @@ ] }, "patch": { - "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingerprint of the subnetwork resource being patched.", "httpMethod": "PATCH", "id": "compute.subnetworks.patch", "parameterOrder": [ @@ -18097,7 +19680,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", "id": "compute.subnetworks.setIamPolicy", "parameterOrder": [ @@ -18141,7 +19724,7 @@ ] }, "setPrivateIpGoogleAccess": { - "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access. (== suppress_warning http-rest-shadowed ==)", + "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", "httpMethod": "POST", "id": "compute.subnetworks.setPrivateIpGoogleAccess", "parameterOrder": [ @@ -18190,7 +19773,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.subnetworks.testIamPermissions", "parameterOrder": [ @@ -18239,7 +19822,7 @@ "targetHttpProxies": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project.", "httpMethod": "GET", "id": "compute.targetHttpProxies.aggregatedList", "parameterOrder": [ @@ -18247,25 +19830,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -18288,7 +19876,7 @@ ] }, "delete": { - "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetHttpProxy resource.", "httpMethod": "DELETE", "id": "compute.targetHttpProxies.delete", "parameterOrder": [ @@ -18326,7 +19914,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", "httpMethod": "GET", "id": "compute.targetHttpProxies.get", "parameterOrder": [ @@ -18360,7 +19948,7 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.targetHttpProxies.insert", "parameterOrder": [ @@ -18393,7 +19981,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", "httpMethod": "GET", "id": "compute.targetHttpProxies.list", "parameterOrder": [ @@ -18401,25 +19989,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -18442,7 +20030,7 @@ ] }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", "id": "compute.targetHttpProxies.setUrlMap", "parameterOrder": [ @@ -18487,7 +20075,7 @@ "targetHttpsProxies": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project.", "httpMethod": "GET", "id": "compute.targetHttpsProxies.aggregatedList", "parameterOrder": [ @@ -18495,25 +20083,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -18536,7 +20129,7 @@ ] }, "delete": { - "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetHttpsProxy resource.", "httpMethod": "DELETE", "id": "compute.targetHttpsProxies.delete", "parameterOrder": [ @@ -18574,7 +20167,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", "httpMethod": "GET", "id": "compute.targetHttpsProxies.get", "parameterOrder": [ @@ -18608,7 +20201,7 @@ ] }, "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.targetHttpsProxies.insert", "parameterOrder": [ @@ -18641,7 +20234,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", "httpMethod": "GET", "id": "compute.targetHttpsProxies.list", "parameterOrder": [ @@ -18649,25 +20242,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -18690,7 +20283,7 @@ ] }, "setQuicOverride": { - "description": "Sets the QUIC override policy for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the QUIC override policy for TargetHttpsProxy.", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setQuicOverride", "parameterOrder": [ @@ -18730,7 +20323,7 @@ ] }, "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Replaces SslCertificates for TargetHttpsProxy.", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setSslCertificates", "parameterOrder": [ @@ -18771,7 +20364,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setSslPolicy", "parameterOrder": [ @@ -18811,7 +20404,7 @@ ] }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the URL map for TargetHttpsProxy.", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setUrlMap", "parameterOrder": [ @@ -18856,7 +20449,7 @@ "targetInstances": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target instances. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of target instances.", "httpMethod": "GET", "id": "compute.targetInstances.aggregatedList", "parameterOrder": [ @@ -18864,25 +20457,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -18905,7 +20503,7 @@ ] }, "delete": { - "description": "Deletes the specified TargetInstance resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetInstance resource.", "httpMethod": "DELETE", "id": "compute.targetInstances.delete", "parameterOrder": [ @@ -18951,7 +20549,7 @@ ] }, "get": { - "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", "httpMethod": "GET", "id": "compute.targetInstances.get", "parameterOrder": [ @@ -18993,7 +20591,7 @@ ] }, "insert": { - "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", "httpMethod": "POST", "id": "compute.targetInstances.insert", "parameterOrder": [ @@ -19034,7 +20632,7 @@ ] }, "list": { - "description": "Retrieves a list of TargetInstance resources available to the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", "httpMethod": "GET", "id": "compute.targetInstances.list", "parameterOrder": [ @@ -19043,25 +20641,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -19095,7 +20693,7 @@ "targetPools": { "methods": { "addHealthCheck": { - "description": "Adds health check URLs to a target pool. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds health check URLs to a target pool.", "httpMethod": "POST", "id": "compute.targetPools.addHealthCheck", "parameterOrder": [ @@ -19144,7 +20742,7 @@ ] }, "addInstance": { - "description": "Adds an instance to a target pool. (== suppress_warning http-rest-shadowed ==)", + "description": "Adds an instance to a target pool.", "httpMethod": "POST", "id": "compute.targetPools.addInstance", "parameterOrder": [ @@ -19193,7 +20791,7 @@ ] }, "aggregatedList": { - "description": "Retrieves an aggregated list of target pools. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of target pools.", "httpMethod": "GET", "id": "compute.targetPools.aggregatedList", "parameterOrder": [ @@ -19201,25 +20799,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -19242,7 +20845,7 @@ ] }, "delete": { - "description": "Deletes the specified target pool. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified target pool.", "httpMethod": "DELETE", "id": "compute.targetPools.delete", "parameterOrder": [ @@ -19288,7 +20891,7 @@ ] }, "get": { - "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", "httpMethod": "GET", "id": "compute.targetPools.get", "parameterOrder": [ @@ -19330,7 +20933,7 @@ ] }, "getHealth": { - "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool. (== suppress_warning http-rest-shadowed ==)", + "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", "httpMethod": "POST", "id": "compute.targetPools.getHealth", "parameterOrder": [ @@ -19375,7 +20978,7 @@ ] }, "insert": { - "description": "Creates a target pool in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a target pool in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.targetPools.insert", "parameterOrder": [ @@ -19416,7 +21019,7 @@ ] }, "list": { - "description": "Retrieves a list of target pools available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of target pools available to the specified project and region.", "httpMethod": "GET", "id": "compute.targetPools.list", "parameterOrder": [ @@ -19425,25 +21028,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -19473,7 +21076,7 @@ ] }, "removeHealthCheck": { - "description": "Removes health check URL from a target pool. (== suppress_warning http-rest-shadowed ==)", + "description": "Removes health check URL from a target pool.", "httpMethod": "POST", "id": "compute.targetPools.removeHealthCheck", "parameterOrder": [ @@ -19522,7 +21125,7 @@ ] }, "removeInstance": { - "description": "Removes instance URL from a target pool. (== suppress_warning http-rest-shadowed ==)", + "description": "Removes instance URL from a target pool.", "httpMethod": "POST", "id": "compute.targetPools.removeInstance", "parameterOrder": [ @@ -19571,7 +21174,7 @@ ] }, "setBackup": { - "description": "Changes a backup target pool's configurations. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes a backup target pool's configurations.", "httpMethod": "POST", "id": "compute.targetPools.setBackup", "parameterOrder": [ @@ -19630,7 +21233,7 @@ "targetSslProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetSslProxy resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetSslProxy resource.", "httpMethod": "DELETE", "id": "compute.targetSslProxies.delete", "parameterOrder": [ @@ -19668,7 +21271,7 @@ ] }, "get": { - "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", "httpMethod": "GET", "id": "compute.targetSslProxies.get", "parameterOrder": [ @@ -19702,7 +21305,7 @@ ] }, "insert": { - "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.targetSslProxies.insert", "parameterOrder": [ @@ -19735,7 +21338,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetSslProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", "httpMethod": "GET", "id": "compute.targetSslProxies.list", "parameterOrder": [ @@ -19743,25 +21346,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -19784,7 +21387,7 @@ ] }, "setBackendService": { - "description": "Changes the BackendService for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the BackendService for TargetSslProxy.", "httpMethod": "POST", "id": "compute.targetSslProxies.setBackendService", "parameterOrder": [ @@ -19825,7 +21428,7 @@ ] }, "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the ProxyHeaderType for TargetSslProxy.", "httpMethod": "POST", "id": "compute.targetSslProxies.setProxyHeader", "parameterOrder": [ @@ -19866,7 +21469,7 @@ ] }, "setSslCertificates": { - "description": "Changes SslCertificates for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes SslCertificates for TargetSslProxy.", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslCertificates", "parameterOrder": [ @@ -19907,7 +21510,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", "parameterOrder": [ @@ -19951,7 +21554,7 @@ "targetTcpProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetTcpProxy resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified TargetTcpProxy resource.", "httpMethod": "DELETE", "id": "compute.targetTcpProxies.delete", "parameterOrder": [ @@ -19989,7 +21592,7 @@ ] }, "get": { - "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", "httpMethod": "GET", "id": "compute.targetTcpProxies.get", "parameterOrder": [ @@ -20023,7 +21626,7 @@ ] }, "insert": { - "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.targetTcpProxies.insert", "parameterOrder": [ @@ -20056,7 +21659,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetTcpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", "httpMethod": "GET", "id": "compute.targetTcpProxies.list", "parameterOrder": [ @@ -20064,25 +21667,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -20105,7 +21708,7 @@ ] }, "setBackendService": { - "description": "Changes the BackendService for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the BackendService for TargetTcpProxy.", "httpMethod": "POST", "id": "compute.targetTcpProxies.setBackendService", "parameterOrder": [ @@ -20146,7 +21749,7 @@ ] }, "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", + "description": "Changes the ProxyHeaderType for TargetTcpProxy.", "httpMethod": "POST", "id": "compute.targetTcpProxies.setProxyHeader", "parameterOrder": [ @@ -20191,7 +21794,7 @@ "targetVpnGateways": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target VPN gateways. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of target VPN gateways.", "httpMethod": "GET", "id": "compute.targetVpnGateways.aggregatedList", "parameterOrder": [ @@ -20199,25 +21802,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -20240,7 +21848,7 @@ ] }, "delete": { - "description": "Deletes the specified target VPN gateway. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified target VPN gateway.", "httpMethod": "DELETE", "id": "compute.targetVpnGateways.delete", "parameterOrder": [ @@ -20286,7 +21894,7 @@ ] }, "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", "httpMethod": "GET", "id": "compute.targetVpnGateways.get", "parameterOrder": [ @@ -20328,7 +21936,7 @@ ] }, "insert": { - "description": "Creates a target VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.targetVpnGateways.insert", "parameterOrder": [ @@ -20369,7 +21977,7 @@ ] }, "list": { - "description": "Retrieves a list of target VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of target VPN gateways available to the specified project and region.", "httpMethod": "GET", "id": "compute.targetVpnGateways.list", "parameterOrder": [ @@ -20378,25 +21986,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -20430,7 +22038,7 @@ "urlMaps": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project.", "httpMethod": "GET", "id": "compute.urlMaps.aggregatedList", "parameterOrder": [ @@ -20438,25 +22046,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -20479,7 +22092,7 @@ ] }, "delete": { - "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified UrlMap resource.", "httpMethod": "DELETE", "id": "compute.urlMaps.delete", "parameterOrder": [ @@ -20517,7 +22130,7 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", "httpMethod": "GET", "id": "compute.urlMaps.get", "parameterOrder": [ @@ -20551,7 +22164,7 @@ ] }, "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", "httpMethod": "POST", "id": "compute.urlMaps.insert", "parameterOrder": [ @@ -20584,7 +22197,7 @@ ] }, "invalidateCache": { - "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap. (== suppress_warning http-rest-shadowed ==)", + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", "httpMethod": "POST", "id": "compute.urlMaps.invalidateCache", "parameterOrder": [ @@ -20625,7 +22238,7 @@ ] }, "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of UrlMap resources available to the specified project.", "httpMethod": "GET", "id": "compute.urlMaps.list", "parameterOrder": [ @@ -20633,25 +22246,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -20674,7 +22287,7 @@ ] }, "patch": { - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.urlMaps.patch", "parameterOrder": [ @@ -20715,7 +22328,7 @@ ] }, "update": { - "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Updates the specified UrlMap resource with the data included in the request.", "httpMethod": "PUT", "id": "compute.urlMaps.update", "parameterOrder": [ @@ -20756,7 +22369,7 @@ ] }, "validate": { - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", "httpMethod": "POST", "id": "compute.urlMaps.validate", "parameterOrder": [ @@ -20796,7 +22409,7 @@ "vpnGateways": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of VPN gateways. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of VPN gateways.", "httpMethod": "GET", "id": "compute.vpnGateways.aggregatedList", "parameterOrder": [ @@ -20804,25 +22417,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -20845,7 +22463,7 @@ ] }, "delete": { - "description": "Deletes the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified VPN gateway.", "httpMethod": "DELETE", "id": "compute.vpnGateways.delete", "parameterOrder": [ @@ -20891,7 +22509,7 @@ ] }, "get": { - "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", "httpMethod": "GET", "id": "compute.vpnGateways.get", "parameterOrder": [ @@ -20933,7 +22551,7 @@ ] }, "getStatus": { - "description": "Returns the status for the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the status for the specified VPN gateway.", "httpMethod": "GET", "id": "compute.vpnGateways.getStatus", "parameterOrder": [ @@ -20975,7 +22593,7 @@ ] }, "insert": { - "description": "Creates a VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.vpnGateways.insert", "parameterOrder": [ @@ -21016,7 +22634,7 @@ ] }, "list": { - "description": "Retrieves a list of VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of VPN gateways available to the specified project and region.", "httpMethod": "GET", "id": "compute.vpnGateways.list", "parameterOrder": [ @@ -21025,25 +22643,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -21073,7 +22691,7 @@ ] }, "setLabels": { - "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", "id": "compute.vpnGateways.setLabels", "parameterOrder": [ @@ -21122,7 +22740,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", "id": "compute.vpnGateways.testIamPermissions", "parameterOrder": [ @@ -21171,7 +22789,7 @@ "vpnTunnels": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of VPN tunnels. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves an aggregated list of VPN tunnels.", "httpMethod": "GET", "id": "compute.vpnTunnels.aggregatedList", "parameterOrder": [ @@ -21179,25 +22797,30 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -21220,7 +22843,7 @@ ] }, "delete": { - "description": "Deletes the specified VpnTunnel resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified VpnTunnel resource.", "httpMethod": "DELETE", "id": "compute.vpnTunnels.delete", "parameterOrder": [ @@ -21266,7 +22889,7 @@ ] }, "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", "httpMethod": "GET", "id": "compute.vpnTunnels.get", "parameterOrder": [ @@ -21308,7 +22931,7 @@ ] }, "insert": { - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", "httpMethod": "POST", "id": "compute.vpnTunnels.insert", "parameterOrder": [ @@ -21349,7 +22972,7 @@ ] }, "list": { - "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", "httpMethod": "GET", "id": "compute.vpnTunnels.list", "parameterOrder": [ @@ -21358,25 +22981,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -21410,7 +23033,7 @@ "zoneOperations": { "methods": { "delete": { - "description": "Deletes the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Deletes the specified zone-specific Operations resource.", "httpMethod": "DELETE", "id": "compute.zoneOperations.delete", "parameterOrder": [ @@ -21448,7 +23071,7 @@ ] }, "get": { - "description": "Retrieves the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the specified zone-specific Operations resource.", "httpMethod": "GET", "id": "compute.zoneOperations.get", "parameterOrder": [ @@ -21490,7 +23113,7 @@ ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves a list of Operation resources contained within the specified zone.", "httpMethod": "GET", "id": "compute.zoneOperations.list", "parameterOrder": [ @@ -21499,25 +23122,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -21545,13 +23168,55 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + "httpMethod": "POST", + "id": "compute.zoneOperations.wait", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/operations/{operation}/wait", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, "zones": { "methods": { "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", "httpMethod": "GET", "id": "compute.zones.get", "parameterOrder": [ @@ -21585,7 +23250,7 @@ ] }, "list": { - "description": "Retrieves the list of Zone resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "description": "Retrieves the list of Zone resources available to the specified project.", "httpMethod": "GET", "id": "compute.zones.list", "parameterOrder": [ @@ -21593,25 +23258,25 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", "type": "string" }, "maxResults": { "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", "format": "uint32", "location": "query", "minimum": "0", "type": "integer" }, "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, @@ -21636,7 +23301,7 @@ } } }, - "revision": "20191025", + "revision": "20200311", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -21656,7 +23321,7 @@ "type": "object" }, "AcceleratorType": { - "description": "Represents an Accelerator Type resource.\n\nGoogle Cloud Platform provides graphics processing units (accelerators) that you can add to VM instances to improve or accelerate performance when working with intensive workloads. For more information, read GPUs on Compute Engine. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", + "description": "Represents an Accelerator Type resource.\n\nGoogle Cloud Platform provides graphics processing units (accelerators) that you can add to VM instances to improve or accelerate performance when working with intensive workloads. For more information, read GPUs on Compute Engine. (== resource_for {$api_version}.acceleratorTypes ==)", "id": "AcceleratorType", "properties": { "creationTimestamp": { @@ -22072,7 +23737,7 @@ "type": "object" }, "Address": { - "description": "Represents an IP Address resource.\n\nAn address resource represents a regional internal IP address. Regional internal IP addresses are RFC 1918 addresses that come from either a primary or secondary IP range of a subnet in a VPC network. Regional external IP addresses can be assigned to GCP VM instances, Cloud VPN gateways, regional external forwarding rules for network load balancers (in either Standard or Premium Tier), and regional external forwarding rules for HTTP(S), SSL Proxy, and TCP Proxy load balancers in Standard Tier. For more information, read IP addresses.\n\nA globalAddresses resource represent a global external IP address. Global external IP addresses are IPv4 or IPv6 addresses. They can only be assigned to global forwarding rules for HTTP(S), SSL Proxy, or TCP Proxy load balancers in Premium Tier. For more information, read Global resources. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", + "description": "Use global external addresses for GFE-based external HTTP(S) load balancers in Premium Tier.\n\nUse global internal addresses for reserved peering network range.\n\nUse regional external addresses for the following resources:\n\n- External IP addresses for VM instances - Regional external forwarding rules - Cloud NAT external IP addresses - GFE based LBs in Standard Tier - Network LBs in Premium or Standard Tier - Cloud VPN gateways (both Classic and HA)\n\nUse regional internal IP addresses for subnet IP ranges (primary and secondary). This includes:\n\n- Internal IP addresses for VM instances - Alias IP ranges of VM instances (/32 only) - Regional internal forwarding rules - Internal TCP/UDP load balancer addresses - Internal HTTP(S) load balancer addresses - Cloud DNS inbound forwarding IP addresses\n\nFor more information, read reserved IP address.\n\n(== resource_for {$api_version}.addresses ==) (== resource_for {$api_version}.globalAddresses ==)", "id": "Address", "properties": { "address": { @@ -22634,6 +24299,11 @@ "$ref": "CustomerEncryptionKey", "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." }, + "diskSizeGb": { + "description": "The size of the disk in GB.", + "format": "int64", + "type": "string" + }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -22651,7 +24321,7 @@ "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." }, "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. TODO(b/131765817): Update documentation when NVME is supported.", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", "enum": [ "NVME", "SCSI" @@ -22686,6 +24356,10 @@ ], "type": "string" }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "[Output Only] shielded vm initial state stored on disk" + }, "source": { "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk.", "type": "string" @@ -22714,11 +24388,11 @@ "type": "string" }, "diskName": { - "description": "Specifies the disk name. If not specified, the default is to use the name of the instance. If the disk with the instance name exists already in the given zone/region, a new name will be automatically generated.", + "description": "Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created.", "type": "string" }, "diskSizeGb": { - "description": "Specifies the size of the disk in base-2 GB. If not specified, the disk will be the same size as the image (usually 10GB). If specified, the size must be equal to or larger than 10GB.", + "description": "Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB.", "format": "int64", "type": "string" }, @@ -22733,6 +24407,20 @@ "description": "Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks.", "type": "object" }, + "onUpdateAction": { + "description": "Specifies which action to take on instance update with this disk. Default is to use the existing disk.", + "enum": [ + "RECREATE_DISK", + "RECREATE_DISK_IF_SOURCE_CHANGED", + "USE_EXISTING_DISK" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "resourcePolicies": { "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", "items": { @@ -22844,7 +24532,7 @@ "type": "object" }, "Autoscaler": { - "description": "Represents an Autoscaler resource.\n\n\n\nUse autoscalers to automatically add or delete instances from a managed instance group according to your defined autoscaling policy. For more information, read Autoscaling Groups of Instances.\n\nFor zonal managed instance groups resource, use the autoscaler resource.\n\nFor regional managed instance groups, use the regionAutoscalers resource. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", + "description": "Represents an Autoscaler resource.\n\nGoogle Compute Engine has two Autoscaler resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/autoscalers) * [Regional](/compute/docs/reference/rest/{$api_version}/regionAutoscalers)\n\nUse autoscalers to automatically add or delete instances from a managed instance group according to your defined autoscaling policy. For more information, read Autoscaling Groups of Instances.\n\nFor zonal managed instance groups resource, use the autoscaler resource.\n\nFor regional managed instance groups, use the regionAutoscalers resource. (== resource_for {$api_version}.autoscalers ==) (== resource_for {$api_version}.regionAutoscalers ==)", "id": "Autoscaler", "properties": { "autoscalingPolicy": { @@ -22872,13 +24560,18 @@ "name": { "annotations": { "required": [ - "compute.instanceGroups.insert" + "compute.autoscalers.insert" ] }, "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "recommendedSize": { + "description": "[Output Only] Target recommended MIG size (number of instances) computed by autoscaler. Autoscaler calculates recommended MIG size even when autoscaling policy mode is different from ON. This field is empty when autoscaler is not connected to the existing managed instance group or autoscaler did not generate its prediction.", + "format": "int32", + "type": "integer" + }, "region": { "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope).", "type": "string" @@ -23164,6 +24857,7 @@ "MISSING_CUSTOM_METRIC_DATA_POINTS", "MISSING_LOAD_BALANCING_DATA_POINTS", "MODE_OFF", + "MODE_ONLY_UP", "MORE_THAN_ONE_BACKEND_SERVICE", "NOT_ENOUGH_QUOTA_AVAILABLE", "REGION_RESOURCE_STOCKOUT", @@ -23188,6 +24882,7 @@ "", "", "", + "", "" ], "type": "string" @@ -23322,6 +25017,20 @@ "description": "The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.", "format": "int32", "type": "integer" + }, + "mode": { + "description": "Defines operating mode for this policy.", + "enum": [ + "OFF", + "ON", + "ONLY_UP" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" } }, "type": "object" @@ -23385,7 +25094,7 @@ "id": "Backend", "properties": { "balancingMode": { - "description": "Specifies the balancing mode for the backend.\n\nWhen choosing a balancing mode, you need to consider the loadBalancingScheme, and protocol for the backend service, as well as the type of backend (instance group or NEG).\n\n \n- If the load balancing mode is CONNECTION, then the load is spread based on how many concurrent connections the backend can handle.\nYou can use the CONNECTION balancing mode if the protocol for the backend service is SSL, TCP, or UDP.\n\nIf the loadBalancingScheme for the backend service is EXTERNAL (SSL Proxy and TCP Proxy load balancers), you must also specify exactly one of the following parameters: maxConnections, maxConnectionsPerInstance, or maxConnectionsPerEndpoint.\n\nIf the loadBalancingScheme for the backend service is INTERNAL (internal TCP/UDP load balancers), you cannot specify any additional parameters.\n \n- If the load balancing mode is RATE, the load is spread based on the rate of HTTP requests per second (RPS).\nYou can use the RATE balancing mode if the protocol for the backend service is HTTP or HTTPS. You must specify exactly one of the following parameters: maxRate, maxRatePerInstance, or maxRatePerEndpoint.\n \n- If the load balancing mode is UTILIZATION, the load is spread based on the CPU utilization of instances in an instance group.\nYou can use the UTILIZATION balancing mode if the loadBalancingScheme of the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED and the backends are instance groups. There are no restrictions on the backend service protocol.", + "description": "Specifies the balancing mode for the backend.\n\nWhen choosing a balancing mode, you need to consider the loadBalancingScheme, and protocol for the backend service, as well as the type of backend (instance group or NEG).\n\n \n- If the load balancing mode is CONNECTION, then the load is spread based on how many concurrent connections the backend can handle.\nYou can use the CONNECTION balancing mode if the protocol for the backend service is SSL, TCP, or UDP.\n\nIf the loadBalancingScheme for the backend service is EXTERNAL (SSL Proxy and TCP Proxy load balancers), you must also specify exactly one of the following parameters: maxConnections (except for regional managed instance groups), maxConnectionsPerInstance, or maxConnectionsPerEndpoint.\n\nIf the loadBalancingScheme for the backend service is INTERNAL (internal TCP/UDP load balancers), you cannot specify any additional parameters.\n \n- If the load balancing mode is RATE, the load is spread based on the rate of HTTP requests per second (RPS).\nYou can use the RATE balancing mode if the protocol for the backend service is HTTP or HTTPS. You must specify exactly one of the following parameters: maxRate (except for regional managed instance groups), maxRatePerInstance, or maxRatePerEndpoint.\n \n- If the load balancing mode is UTILIZATION, the load is spread based on the backend utilization of instances in an instance group.\nYou can use the UTILIZATION balancing mode if the loadBalancingScheme of the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED and the backends are instance groups. There are no restrictions on the backend service protocol.", "enum": [ "CONNECTION", "RATE", @@ -23407,42 +25116,46 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "failover": { + "description": "This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService.", + "type": "boolean" + }, "group": { "description": "The fully-qualified URL of an instance group or network endpoint group (NEG) resource. The type of backend that a backend service supports depends on the backend service's loadBalancingScheme.\n\n \n- When the loadBalancingScheme for the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, the backend can be either an instance group or a NEG. The backends on the backend service must be either all instance groups or all NEGs. You cannot mix instance group and NEG backends on the same backend service. \n\n\n- When the loadBalancingScheme for the backend service is INTERNAL, the backend must be an instance group in the same region as the backend service. NEGs are not supported. \n\nYou must use the fully-qualified URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported.", "type": "string" }, "maxConnections": { - "description": "Defines a maximum target for simultaneous connections for the entire backend (instance group or NEG). If the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is CONNECTION, and backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnectionsPerInstance, or maxConnectionsPerEndpoint.\n\nNot available if the backend's balancingMode is RATE. If the loadBalancingScheme is INTERNAL, then maxConnections is not supported, even though the backend requires a balancing mode of CONNECTION.", + "description": "Defines a target maximum number of simultaneous connections that the backend can handle. Valid for network endpoint group and instance group backends (except for regional managed instance groups). If the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is CONNECTION, and backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnectionsPerInstance, or maxConnectionsPerEndpoint.\n\nNot available if the backend's balancingMode is RATE. If the loadBalancingScheme is INTERNAL, then maxConnections is not supported, even though the backend requires a balancing mode of CONNECTION.", "format": "int32", "type": "integer" }, "maxConnectionsPerEndpoint": { - "description": "Defines a maximum target for simultaneous connections for an endpoint of a NEG. This is multiplied by the number of endpoints in the NEG to implicitly calculate a maximum number of target maximum simultaneous connections for the NEG. If the backend's balancingMode is CONNECTION, and the backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnections, or maxConnectionsPerInstance.\n\nNot available if the backend's balancingMode is RATE. Internal TCP/UDP load balancing does not support setting maxConnectionsPerEndpoint even though its backends require a balancing mode of CONNECTION.", + "description": "Defines a target maximum number of simultaneous connections for an endpoint of a NEG. This is multiplied by the number of endpoints in the NEG to implicitly calculate a maximum number of target maximum simultaneous connections for the NEG. If the backend's balancingMode is CONNECTION, and the backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnections, or maxConnectionsPerInstance.\n\nNot available if the backend's balancingMode is RATE. Internal TCP/UDP load balancing does not support setting maxConnectionsPerEndpoint even though its backends require a balancing mode of CONNECTION.", "format": "int32", "type": "integer" }, "maxConnectionsPerInstance": { - "description": "Defines a maximum target for simultaneous connections for a single VM in a backend instance group. This is multiplied by the number of instances in the instance group to implicitly calculate a target maximum number of simultaneous connections for the whole instance group. If the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is CONNECTION, and backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnections, or maxConnectionsPerEndpoint.\n\nNot available if the backend's balancingMode is RATE. Internal TCP/UDP load balancing does not support setting maxConnectionsPerInstance even though its backends require a balancing mode of CONNECTION.", + "description": "Defines a target maximum number of simultaneous connections for a single VM in a backend instance group. This is multiplied by the number of instances in the instance group to implicitly calculate a target maximum number of simultaneous connections for the whole instance group. If the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is CONNECTION, and backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnections, or maxConnectionsPerEndpoint.\n\nNot available if the backend's balancingMode is RATE. Internal TCP/UDP load balancing does not support setting maxConnectionsPerInstance even though its backends require a balancing mode of CONNECTION.", "format": "int32", "type": "integer" }, "maxRate": { - "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "description": "Defines a maximum number of HTTP requests per second (RPS) that the backend can handle. Valid for network endpoint group and instance group backends (except for regional managed instance groups). Must not be defined if the backend is a managed instance group that uses autoscaling based on load balancing.\n\nIf the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is RATE, you must specify maxRate, maxRatePerInstance, or maxRatePerEndpoint.\n\nNot available if the backend's balancingMode is CONNECTION.", "format": "int32", "type": "integer" }, "maxRatePerEndpoint": { - "description": "Defines a maximum target for requests per second (RPS) for an endpoint of a NEG. This is multiplied by the number of endpoints in the NEG to implicitly calculate a target maximum rate for the NEG.\n\nIf the backend's balancingMode is RATE, you must specify either this parameter, maxRate, or maxRatePerInstance.\n\nNot available if the backend's balancingMode is CONNECTION.", + "description": "Defines a maximum target for requests per second (RPS) for an endpoint of a NEG. This is multiplied by the number of endpoints in the NEG to implicitly calculate a target maximum rate for the NEG.\n\nIf the backend's balancingMode is RATE, you must specify either this parameter, maxRate (except for regional managed instance groups), or maxRatePerInstance.\n\nNot available if the backend's balancingMode is CONNECTION.", "format": "float", "type": "number" }, "maxRatePerInstance": { - "description": "Defines a maximum target for requests per second (RPS) for a single VM in a backend instance group. This is multiplied by the number of instances in the instance group to implicitly calculate a target maximum rate for the whole instance group.\n\nIf the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is RATE, you must specify either this parameter, maxRate, or maxRatePerEndpoint.\n\nNot available if the backend's balancingMode is CONNECTION.", + "description": "Defines a maximum target for requests per second (RPS) for a single VM in a backend instance group. This is multiplied by the number of instances in the instance group to implicitly calculate a target maximum rate for the whole instance group.\n\nIf the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is RATE, you must specify either this parameter, maxRate (except for regional managed instance groups), or maxRatePerEndpoint.\n\nNot available if the backend's balancingMode is CONNECTION.", "format": "float", "type": "number" }, "maxUtilization": { - "description": "Defines the maximum average CPU utilization of a backend VM in an instance group. The valid range is [0.0, 1.0]. This is an optional parameter if the backend's balancingMode is UTILIZATION.\n\nThis parameter can be used in conjunction with maxRate, maxRatePerInstance, maxConnections, or maxConnectionsPerInstance.", + "description": "Defines the maximum average backend utilization of a backend VM in an instance group. The valid range is [0.0, 1.0]. This is an optional parameter if the backend's balancingMode is UTILIZATION.\n\nThis parameter can be used in conjunction with maxRate, maxRatePerInstance, maxConnections (except for regional managed instance groups), or maxConnectionsPerInstance.", "format": "float", "type": "number" } @@ -23627,7 +25340,7 @@ "type": "object" }, "BackendService": { - "description": "Represents a Backend Service resource.\n\nA backend service contains configuration values for Google Cloud Platform load balancing services.\n\nFor more information, read Backend Services.\n\n(== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", + "description": "Represents a Backend Service resource.\n\nA backend service contains configuration values for Google Cloud Platform load balancing services.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, read Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", "id": "BackendService", "properties": { "affinityCookieTtlSec": { @@ -23676,13 +25389,17 @@ "description": "If true, enables Cloud CDN for the backend service. Only applicable if the loadBalancingScheme is EXTERNAL and the protocol is HTTP or HTTPS.", "type": "boolean" }, + "failoverPolicy": { + "$ref": "BackendServiceFailoverPolicy", + "description": "Applicable only to Failover for Internal TCP/UDP Load Balancing. Requires at least one backend instance group to be defined as a backup (failover) backend." + }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a BackendService.", "format": "byte", "type": "string" }, "healthChecks": { - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for Compute Engine backend services. A health check must not be specified for App Engine backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", + "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently at most one health check can be specified. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet NEG backends must not have a health check. A health check must", "items": { "type": "string" }, @@ -23720,7 +25437,7 @@ "type": "string" }, "localityLbPolicy": { - "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.", + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to \u003eMAGLEV or RING_HASH, session affinity settings will not take effect.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -23741,11 +25458,19 @@ ], "type": "string" }, + "logConfig": { + "$ref": "BackendServiceLogConfig", + "description": "This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver." + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "network": { + "description": "The URL of the network to which this backend service belongs. This field can only be spcified when the load balancing scheme is set to INTERNAL.", + "type": "string" + }, "outlierDetection": { "$ref": "OutlierDetection", "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." @@ -23756,11 +25481,11 @@ "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. Required when the loadBalancingScheme is EXTERNAL and the backends are instance groups. The named port must be defined on each backend instance group. This parameter has no meaning if the backends are NEGs.\n\n\n\nMust be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Blaancing).", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. Required when the loadBalancingScheme is EXTERNAL, INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED and the backends are instance groups. The named port must be defined on each backend instance group. This parameter has no meaning if the backends are NEGs.\n\n\n\nMust be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Blaancing).", "type": "string" }, "protocol": { - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP, depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP. depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.", "enum": [ "HTTP", "HTTP2", @@ -23792,7 +25517,7 @@ "type": "string" }, "sessionAffinity": { - "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.", + "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.", "enum": [ "CLIENT_IP", "CLIENT_IP_PORT_PROTO", @@ -23957,6 +25682,26 @@ }, "type": "object" }, + "BackendServiceFailoverPolicy": { + "description": "Applicable only to Failover for Internal TCP/UDP Load Balancing. On failover or failback, this field indicates whether connection draining will be honored. GCP has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "id": "BackendServiceFailoverPolicy", + "properties": { + "disableConnectionDrainOnFailover": { + "description": "This can be set to true only if the protocol is TCP.\n\nThe default is false.", + "type": "boolean" + }, + "dropTrafficIfUnhealthy": { + "description": "Applicable only to Failover for Internal TCP/UDP Load Balancing. If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy. If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy.\n\nThe default is false.", + "type": "boolean" + }, + "failoverRatio": { + "description": "Applicable only to Failover for Internal TCP/UDP Load Balancing. The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, "BackendServiceGroupHealth": { "id": "BackendServiceGroupHealth", "properties": { @@ -24107,6 +25852,22 @@ }, "type": "object" }, + "BackendServiceLogConfig": { + "description": "The available logging options for the load balancer traffic served by this backend service.", + "id": "BackendServiceLogConfig", + "properties": { + "enable": { + "description": "This field denotes whether to enable logging for the load balancer traffic served by this backend service.", + "type": "boolean" + }, + "sampleRate": { + "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, "BackendServiceReference": { "id": "BackendServiceReference", "properties": { @@ -24219,7 +25980,7 @@ "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", "items": { "type": "string" }, @@ -24311,7 +26072,7 @@ "type": "object" }, "Commitment": { - "description": "Represents a regional Commitment resource.\n\nCreating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts. (== resource_for beta.regionCommitments ==) (== resource_for v1.regionCommitments ==)", + "description": "Represents a regional Commitment resource.\n\nCreating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts. (== resource_for {$api_version}.regionCommitments ==)", "id": "Commitment", "properties": { "creationTimestamp": { @@ -24899,7 +26660,7 @@ "type": "array" }, "maxAge": { - "description": "Specifies how long the results of a preflight request can be cached. This translates to the content for the Access-Control-Max-Age header.", + "description": "Specifies how long results of a preflight request can be cached in seconds. This translates to the Access-Control-Max-Age header.", "format": "int32", "type": "integer" } @@ -24914,6 +26675,10 @@ "description": "The name of the encryption key that is stored in Google Cloud KMS.", "type": "string" }, + "kmsKeyServiceAccount": { + "description": "The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used.", + "type": "string" + }, "rawKey": { "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource.", "type": "string" @@ -24979,7 +26744,7 @@ "type": "object" }, "Disk": { - "description": "Represents a Persistent Disk resource.\n\nPersistent disks are required for running your VM instances. Create both boot and non-boot (data) persistent disks. For more information, read Persistent Disks. For more storage options, read Storage options.\n\nThe disks resource represents a zonal persistent disk. For more information, read Zonal persistent disks.\n\nThe regionDisks resource represents a regional persistent disk. For more information, read Regional resources. (== resource_for beta.disks ==) (== resource_for v1.disks ==) (== resource_for v1.regionDisks ==) (== resource_for beta.regionDisks ==)", + "description": "Represents a Persistent Disk resource.\n\nGoogle Compute Engine has two Disk resources:\n\n* [Zonal](/compute/docs/reference/rest/{$api_version}/disks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionDisks)\n\nPersistent disks are required for running your VM instances. Create both boot and non-boot (data) persistent disks. For more information, read Persistent Disks. For more storage options, read Storage options.\n\nThe disks resource represents a zonal persistent disk. For more information, read Zonal persistent disks.\n\nThe regionDisks resource represents a regional persistent disk. For more information, read Regional resources. (== resource_for {$api_version}.disks ==) (== resource_for {$api_version}.regionDisks ==)", "id": "Disk", "properties": { "creationTimestamp": { @@ -25432,7 +27197,7 @@ "type": "object" }, "DiskType": { - "description": "Represents a Disk Type resource.\n\nYou can choose from a variety of disk types based on your needs. For more information, read Storage options.\n\nThe diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks.\n\nThe regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==) (== resource_for v1.regionDiskTypes ==) (== resource_for beta.regionDiskTypes ==)", + "description": "Represents a Disk Type resource.\n\nGoogle Compute Engine has two Disk Type resources:\n\n* [Regional](/compute/docs/reference/rest/{$api_version}/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/{$api_version}/diskTypes)\n\nYou can choose from a variety of disk types based on your needs. For more information, read Storage options.\n\nThe diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks.\n\nThe regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks. (== resource_for {$api_version}.diskTypes ==) (== resource_for {$api_version}.regionDiskTypes ==)", "id": "DiskType", "properties": { "creationTimestamp": { @@ -25991,31 +27756,179 @@ }, "type": "object" }, + "ExchangedPeeringRoute": { + "id": "ExchangedPeeringRoute", + "properties": { + "destRange": { + "description": "The destination range of the route.", + "type": "string" + }, + "imported": { + "description": "True if the peering route has been imported from a peer. The actual import happens if the field networkPeering.importCustomRoutes is true for this network, and networkPeering.exportCustomRoutes is true for the peer network, and the import does not result in a route conflict.", + "type": "boolean" + }, + "nextHopRegion": { + "description": "The region of peering route next hop, only applies to dynamic routes.", + "type": "string" + }, + "priority": { + "description": "The priority of the peering route.", + "format": "uint32", + "type": "integer" + }, + "type": { + "description": "The type of the peering route.", + "enum": [ + "DYNAMIC_PEERING_ROUTE", + "STATIC_PEERING_ROUTE", + "SUBNET_PEERING_ROUTE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ExchangedPeeringRoutesList": { + "id": "ExchangedPeeringRoutesList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of ExchangedPeeringRoute resources.", + "items": { + "$ref": "ExchangedPeeringRoute" + }, + "type": "array" + }, + "kind": { + "default": "compute#exchangedPeeringRoutesList", + "description": "[Output Only] Type of resource. Always compute#exchangedPeeringRoutesList for exchanged peering routes lists.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "Expr": { - "description": "Represents an expression text. Example:\n\ntitle: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.\n\nExample (Comparison):\n\ntitle: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\"\n\nExample (Equality):\n\ntitle: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\"\n\nExample (Logic):\n\ntitle: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\"\n\nExample (Data Manipulation):\n\ntitle: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\"\n\nThe exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", "properties": { "description": { - "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax.\n\nThe application context of the containing message determines which well-known feature set of CEL is supported.", + "description": "Textual representation of an expression in Common Expression Language syntax.", "type": "string" }, "location": { - "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" }, "title": { - "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, "ExternalVpnGateway": { - "description": "External VPN gateway is the on-premises VPN gateway(s) or another cloud provider?s VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud to your on-premises side or another Cloud provider's VPN gateway, you must create a external VPN gateway resource in GCP, which provides the information to GCP about your external VPN gateway.", + "description": "External VPN gateway is the on-premises VPN gateway(s) or another cloud provider's VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud to your on-premises side or another Cloud provider's VPN gateway, you must create a external VPN gateway resource in GCP, which provides the information to GCP about your external VPN gateway.", "id": "ExternalVpnGateway", "properties": { "creationTimestamp": { @@ -26096,7 +28009,7 @@ "type": "integer" }, "ipAddress": { - "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider?s VPN gateway, it cannot be an IP address from Google Compute Engine.", + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine.", "type": "string" } }, @@ -26217,6 +28130,30 @@ }, "type": "object" }, + "FileContentBuffer": { + "id": "FileContentBuffer", + "properties": { + "content": { + "description": "The raw content in the secure keys file.", + "format": "byte", + "type": "string" + }, + "fileType": { + "enum": [ + "BIN", + "UNDEFINED", + "X509" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "Firewall": { "description": "Represents a Firewall Rule resource.\n\nFirewall rules allow or deny ingress traffic to, and egress traffic from your instances. For more information, read Firewall rules.", "id": "Firewall", @@ -26514,7 +28451,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource.\n\nA forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway).\n\nFor more information, read Forwarding rule concepts and Using protocol forwarding.\n\n(== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", + "description": "Represents a Forwarding Rule resource.\n\nForwarding rule resources in GCP can be either regional or global in scope:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/globalForwardingRules) * [Regional](/compute/docs/reference/rest/{$api_version}/forwardingRules)\n\nA forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway).\n\nFor more information, read Forwarding rule concepts and Using protocol forwarding.\n\n(== resource_for {$api_version}.forwardingRules ==) (== resource_for {$api_version}.globalForwardingRules ==) (== resource_for {$api_version}.regionForwardingRules ==)", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -26522,7 +28459,7 @@ "type": "string" }, "IPProtocol": { - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nFor Internal TCP/UDP Load Balancing, the load balancing scheme is INTERNAL, and one of TCP or UDP are valid. For Traffic Director, the load balancing scheme is INTERNAL_SELF_MANAGED, and only TCPis valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is INTERNAL_MANAGED, and only TCP is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is EXTERNAL and only TCP is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is EXTERNAL, and one of TCP or UDP is valid.", + "description": "The IP protocol to which this rule applies. For protocol forwarding, valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nFor Internal TCP/UDP Load Balancing, the load balancing scheme is INTERNAL, and one of TCP or UDP are valid. For Traffic Director, the load balancing scheme is INTERNAL_SELF_MANAGED, and only TCPis valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is INTERNAL_MANAGED, and only TCP is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is EXTERNAL and only TCP is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is EXTERNAL, and one of TCP or UDP is valid.", "enum": [ "AH", "ESP", @@ -26545,6 +28482,10 @@ "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. This field cannot be used with port or portRange fields.\n\nWhen the load balancing scheme is INTERNAL and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", "type": "boolean" }, + "allowGlobalAccess": { + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + "type": "boolean" + }, "backendService": { "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", "type": "string" @@ -26557,6 +28498,11 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a ForwardingRule. Include the fingerprint in patch request to ensure that you do not overwrite changes that were applied from another concurrent request.\n\nTo see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "format": "byte", + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -26576,13 +28522,17 @@ ], "type": "string" }, + "isMirroringCollector": { + "description": "Indicates whether or not this load balancer can be used as a collector for packet mirroring. To prevent mirroring loops, instances behind this load balancer will not have their traffic mirrored even if a PacketMirroring rule applies to them. This can only be set to true for load balancers that have their loadBalancingScheme set to INTERNAL.", + "type": "boolean" + }, "kind": { "default": "compute#forwardingRule", "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", "type": "string" }, "loadBalancingScheme": { - "description": "Specifies the forwarding rule type. EXTERNAL is used for: - Classic Cloud VPN gateways - Protocol forwarding to VMs from an external IP address - The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP.\n\nINTERNAL is used for: - Protocol forwarding to VMs from an internal IP address - Internal TCP/UDP load balancers\n\nINTERNAL_MANAGED is used for: - Internal HTTP(S) load balancers\n\nINTERNAL_SELF_MANAGED is used for: - Traffic Director\n\nFor more information about forwarding rules, refer to Forwarding rule concepts.", + "description": "Specifies the forwarding rule type.\n\n \n- EXTERNAL is used for: \n- Classic Cloud VPN gateways \n- Protocol forwarding to VMs from an external IP address \n- The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP \n- INTERNAL is used for: \n- Protocol forwarding to VMs from an internal IP address \n- Internal TCP/UDP load balancers \n- INTERNAL_MANAGED is used for: \n- Internal HTTP(S) load balancers \n- INTERNAL_SELF_MANAGED is used for: \n- Traffic Director \n\nFor more information about forwarding rules, refer to Forwarding rule concepts.", "enum": [ "EXTERNAL", "INTERNAL", @@ -26600,7 +28550,7 @@ "type": "string" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overridden by those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be visible to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nmetadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "items": { "$ref": "MetadataFilter" }, @@ -26616,7 +28566,7 @@ "type": "string" }, "networkTier": { - "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM , STANDARD.\n\nFor regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, the valid value is PREMIUM.\n\nIf this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, this value must be equal to the networkTier of the Address.", + "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM, STANDARD.\n\nFor regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, the valid value is PREMIUM.\n\nIf this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, this value must be equal to the networkTier of the Address.", "enum": [ "PREMIUM", "STANDARD" @@ -26628,11 +28578,11 @@ "type": "string" }, "portRange": { - "description": "This field is deprecated. See the port\nfield.", + "description": "When the load balancing scheme is EXTERNAL, INTERNAL_SELF_MANAGED and INTERNAL_MANAGED, you can specify a port_range. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", "type": "string" }, "ports": { - "description": "List of comma-separated ports. The forwarding rule forwards packets with matching destination ports. If the forwarding rule's loadBalancingScheme is EXTERNAL, and the forwarding rule references a target pool, specifying ports is optional. You can specify an unlimited number of ports, but they must be contiguous. If you omit ports, GCP forwards traffic on any port of the forwarding rule's protocol.\n\nIf the forwarding rule's loadBalancingScheme is EXTERNAL, and the forwarding rule references a target HTTP proxy, target HTTPS proxy, target TCP proxy, target SSL proxy, or target VPN gateway, you must specify ports using the following constraints:\n\n \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500 \n\nIf the forwarding rule's loadBalancingScheme is INTERNAL, you must specify ports in one of the following ways:\n\n* A list of up to five ports, which can be non-contiguous * Keyword ALL, which causes the forwarding rule to forward traffic on any port of the forwarding rule's protocol.\n\nThe ports field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.", + "description": "This field is used along with the backend_service field for internal load balancing.\n\nWhen the load balancing scheme is INTERNAL, a list of ports can be configured, for example, ['80'], ['8000','9000']. Only packets addressed to these ports are forwarded to the backends configured with the forwarding rule.\n\nIf the forwarding rule's loadBalancingScheme is INTERNAL, you can specify ports in one of the following ways:\n\n* A list of up to five ports, which can be non-contiguous * Keyword ALL, which causes the forwarding rule to forward traffic on any port of the forwarding rule's protocol.", "items": { "type": "string" }, @@ -26660,7 +28610,7 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid.", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy is valid, not targetHttpsProxy.", "type": "string" } }, @@ -26993,6 +28943,32 @@ }, "type": "object" }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "GlobalNetworkEndpointGroupsDetachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsDetachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, "GlobalSetLabelsRequest": { "id": "GlobalSetLabelsRequest", "properties": { @@ -27043,7 +29019,7 @@ "type": "string" }, "queryPath": { - "description": "The path to be queried. This can be the default namespace ('/') or a nested namespace ('//') or a specified key ('//')", + "description": "The path to be queried. This can be the default namespace ('/') or a nested namespace ('/\\/') or a specified key ('/\\/\\')", "type": "string" }, "queryValue": { @@ -27141,7 +29117,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP2 health check follows behavior specified in\nport\nand\nportName\nfields.", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP2 health check follows behavior specified in port and portName fields.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -27194,7 +29170,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP health check follows behavior specified in\nport\nand\nportName\nfields.", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP health check follows behavior specified in port and portName fields.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -27247,7 +29223,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTPS health check follows behavior specified in\nport\nand\nportName\nfields.", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTPS health check follows behavior specified in port and portName fields.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -27284,7 +29260,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource.\n\nHealth checks are used for most GCP load balancers and managed instance group auto-healing. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", + "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers use regional health checks. All other types of GCP load balancers and managed instance group auto-healing use global health checks. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -27724,11 +29700,11 @@ "type": "string" }, "ipAddress": { - "description": "The IP address represented by this resource.", + "description": "A forwarding rule IP address assigned to this instance.", "type": "string" }, "port": { - "description": "The port on the instance.", + "description": "The named port of the instance group, not necessarily the port that is health-checked.", "format": "int32", "type": "integer" } @@ -27778,7 +29754,7 @@ "type": "string" }, "hosts": { - "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", "items": { "type": "string" }, @@ -27895,15 +29871,15 @@ "type": "string" }, "presentMatch": { - "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value or not.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", "type": "boolean" }, "rangeMatch": { "$ref": "Int64RangeMatch", - "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails.\nFor example for a range [-5, 0] \n- -3 will match. \n- 0 will not match. \n- 0.25 will not match. \n- -3someString will not match. \nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set." + "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails.\nFor example for a range [-5, 0] \n- -3 will match. \n- 0 will not match. \n- 0.25 will not match. \n- -3someString will not match. \nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.\nNote that rangeMatch is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL." }, "regexMatch": { - "description": "The value of the header must match the regualar expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript \nFor matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "description": "The value of the header must match the regular expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript \nFor matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.\nNote that regexMatch only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "type": "string" }, "suffixMatch": { @@ -28116,7 +30092,7 @@ "id": "HttpQueryParameterMatch", "properties": { "exactMatch": { - "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch.\nOnly one of presentMatch, exactMatch or regexMatch must be set.", "type": "string" }, "name": { @@ -28124,11 +30100,11 @@ "type": "string" }, "presentMatch": { - "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not.\nOnly one of presentMatch, exactMatch or regexMatch must be set.", "type": "boolean" }, "regexMatch": { - "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of presentMatch, exactMatch or regexMatch must be set.\nNote that regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" } }, @@ -28147,11 +30123,11 @@ "type": "boolean" }, "pathRedirect": { - "description": "The path that will be used in the redirect response instead of the one that was supplied in the request.\nOnly one of pathRedirect or prefixRedirect must be specified.\nThe value must be between 1 and 1024 characters.", + "description": "The path that will be used in the redirect response instead of the one that was supplied in the request.\npathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect.\nThe value must be between 1 and 1024 characters.", "type": "string" }, "prefixRedirect": { - "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request.", + "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request.\nprefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect.\nThe value must be between 1 and 1024 characters.", "type": "string" }, "redirectResponseCode": { @@ -28227,7 +30203,7 @@ }, "urlRewrite": { "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service" + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service." }, "weightedBackendServices": { "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", @@ -28264,7 +30240,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -28282,7 +30258,7 @@ "id": "HttpRouteRuleMatch", "properties": { "fullPathMatch": { - "description": "For satifying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL.\nFullPathMatch must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "description": "For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL.\nfullPathMatch must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" }, "headerMatches": { @@ -28293,18 +30269,18 @@ "type": "array" }, "ignoreCase": { - "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\ncaseSensitive must not be used with regexMatch.", + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.", "type": "boolean" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overrides those specified in ForwardingRule that refers to this UrlMap.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "items": { "$ref": "MetadataFilter" }, "type": "array" }, "prefixMatch": { - "description": "For satifying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /.\nThe value must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /.\nThe value must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" }, "queryParameterMatches": { @@ -28315,7 +30291,7 @@ "type": "array" }, "regexMatch": { - "description": "For satifying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.\nNote that regexMatch only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "type": "string" } }, @@ -28501,7 +30477,7 @@ "type": "object" }, "Image": { - "description": "Represents an Image resource.\n\nYou can use images to create boot disks for your VM instances. For more information, read Images. (== resource_for beta.images ==) (== resource_for v1.images ==)", + "description": "Represents an Image resource.\n\nYou can use images to create boot disks for your VM instances. For more information, read Images. (== resource_for {$api_version}.images ==)", "id": "Image", "properties": { "archiveSizeBytes": { @@ -28622,6 +30598,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "Set the secure boot keys of shielded instance." + }, "sourceDisk": { "description": "URL of the source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", "type": "string" @@ -28807,8 +30787,40 @@ }, "type": "object" }, + "InitialStateConfig": { + "description": "Initial State for shielded instance, these are public keys which are safe to store in public", + "id": "InitialStateConfig", + "properties": { + "dbs": { + "description": "The Key Database (db).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" + }, + "dbxs": { + "description": "The forbidden key database (dbx).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" + }, + "keks": { + "description": "The Key Exchange Key (KEK).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" + }, + "pk": { + "$ref": "FileContentBuffer", + "description": "The Platform Key (PK)." + } + }, + "type": "object" + }, "Instance": { - "description": "Represents an Instance resource.\n\nAn instance is a virtual machine that is hosted on Google Cloud Platform. For more information, read Virtual Machine Instances. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", + "description": "Represents an Instance resource.\n\nAn instance is a virtual machine that is hosted on Google Cloud Platform. For more information, read Virtual Machine Instances. (== resource_for {$api_version}.instances ==)", "id": "Instance", "properties": { "canIpForward": { @@ -28842,6 +30854,11 @@ "$ref": "DisplayDevice", "description": "Enables display device for the instance." }, + "fingerprint": { + "description": "Specifies a fingerprint for this resource, which is essentially a hash of the instance's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update the instance. You must always provide an up-to-date fingerprint hash in order to update the instance.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte", + "type": "string" + }, "guestAccelerators": { "description": "A list of the type and count of accelerator cards attached to the instance.", "items": { @@ -28913,6 +30930,13 @@ "$ref": "ReservationAffinity", "description": "Specifies the reservations that this instance can consume from." }, + "resourcePolicies": { + "description": "Resource policies applied to this instance.", + "items": { + "type": "string" + }, + "type": "array" + }, "scheduling": { "$ref": "Scheduling", "description": "Sets the scheduling options for this instance." @@ -28941,6 +30965,7 @@ "status": { "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", "enum": [ + "DEPROVISIONING", "PROVISIONING", "REPAIRING", "RUNNING", @@ -28960,6 +30985,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29092,7 +31118,7 @@ "type": "object" }, "InstanceGroup": { - "description": "Represents an unmanaged Instance Group resource.\n\nUse unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. For more information, read Instance groups.\n\nFor zonal unmanaged Instance Group, use instanceGroups resource.\n\nFor regional unmanaged Instance Group, use regionInstanceGroups resource. (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", + "description": "Represents an Instance Group resource.\n\nInstance Groups can be used to configure a target for load balancing.\n\nInstance groups can either be managed or unmanaged.\n\nTo create managed instance groups, use the instanceGroupManager or regionInstanceGroupManager resource instead.\n\nUse zonal unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. You cannot create regional unmanaged instance groups.\n\nFor more information, read Instance groups.\n\n(== resource_for {$api_version}.instanceGroups ==) (== resource_for {$api_version}.regionInstanceGroups ==)", "id": "InstanceGroup", "properties": { "creationTimestamp": { @@ -29121,7 +31147,7 @@ "name": { "annotations": { "required": [ - "compute.instanceGroupManagers.insert" + "compute.instanceGroups.insert" ] }, "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", @@ -29388,7 +31414,7 @@ "type": "object" }, "InstanceGroupManager": { - "description": "Represents a Managed Instance Group resource.\n\nAn instance group is a collection of VM instances that you can manage as a single entity. For more information, read Instance groups.\n\nFor zonal Managed Instance Group, use the instanceGroupManagers resource.\n\nFor regional Managed Instance Group, use the regionInstanceGroupManagers resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", + "description": "Represents a Managed Instance Group resource.\n\nAn instance group is a collection of VM instances that you can manage as a single entity. For more information, read Instance groups.\n\nFor zonal Managed Instance Group, use the instanceGroupManagers resource.\n\nFor regional Managed Instance Group, use the regionInstanceGroupManagers resource. (== resource_for {$api_version}.instanceGroupManagers ==) (== resource_for {$api_version}.regionInstanceGroupManagers ==)", "id": "InstanceGroupManager", "properties": { "autoHealingPolicies": { @@ -29491,7 +31517,7 @@ "compute.regionInstanceGroupManagers.insert" ] }, - "description": "The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number.", + "description": "The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number.", "format": "int32", "type": "integer" }, @@ -29807,9 +31833,27 @@ "InstanceGroupManagerStatus": { "id": "InstanceGroupManagerStatus", "properties": { + "autoscaler": { + "description": "[Output Only] The URL of the Autoscaler that targets this instance group manager.", + "type": "string" + }, "isStable": { "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", "type": "boolean" + }, + "versionTarget": { + "$ref": "InstanceGroupManagerStatusVersionTarget", + "description": "[Output Only] A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager." + } + }, + "type": "object" + }, + "InstanceGroupManagerStatusVersionTarget": { + "id": "InstanceGroupManagerStatusVersionTarget", + "properties": { + "isReached": { + "description": "[Output Only] A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.", + "type": "boolean" } }, "type": "object" @@ -29817,6 +31861,18 @@ "InstanceGroupManagerUpdatePolicy": { "id": "InstanceGroupManagerUpdatePolicy", "properties": { + "instanceRedistributionType": { + "description": "The instance redistribution policy for regional managed instance groups. Valid values are: \n- PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. \n- NONE: For non-autoscaled groups, proactive redistribution is disabled.", + "enum": [ + "NONE", + "PROACTIVE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "maxSurge": { "$ref": "FixedOrPercent", "description": "The maximum number of instances that can be created above the specified targetSize during the update process. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." @@ -29828,9 +31884,25 @@ "minimalAction": { "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", "enum": [ + "NONE", + "REFRESH", "REPLACE", "RESTART" ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "replacementMethod": { + "description": "What action should be used to replace instances. See minimal_action.REPLACE", + "enum": [ + "RECREATE", + "SUBSTITUTE" + ], "enumDescriptions": [ "", "" @@ -29883,6 +31955,66 @@ }, "type": "object" }, + "InstanceGroupManagersApplyUpdatesRequest": { + "description": "InstanceGroupManagers.applyUpdatesToInstances", + "id": "InstanceGroupManagersApplyUpdatesRequest", + "properties": { + "instances": { + "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + }, + "type": "array" + }, + "minimalAction": { + "description": "The minimal action that you want to perform on each instance during the update: \n- REPLACE: At minimum, delete the instance and create it again. \n- RESTART: Stop the instance and start it again. \n- REFRESH: Do not stop the instance. \n- NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that you want to perform on each instance during the update: \n- REPLACE: Delete the instance and create it again. \n- RESTART: Stop the instance and start it again. \n- REFRESH: Do not stop the instance. \n- NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagersCreateInstancesRequest": { + "description": "InstanceGroupManagers.createInstances", + "id": "InstanceGroupManagersCreateInstancesRequest", + "properties": { + "instances": { + "description": "[Required] List of specifications of per-instance configs.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, "InstanceGroupManagersDeleteInstancesRequest": { "id": "InstanceGroupManagersDeleteInstancesRequest", "properties": { @@ -29896,6 +32028,23 @@ }, "type": "object" }, + "InstanceGroupManagersListErrorsResponse": { + "id": "InstanceGroupManagersListErrorsResponse", + "properties": { + "items": { + "description": "[Output Only] The list of errors of the managed instance group.", + "items": { + "$ref": "InstanceManagedByIgmError" + }, + "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + } + }, + "type": "object" + }, "InstanceGroupManagersListManagedInstancesResponse": { "id": "InstanceGroupManagersListManagedInstancesResponse", "properties": { @@ -29905,6 +32054,10 @@ "$ref": "ManagedInstance" }, "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" } }, "type": "object" @@ -30535,6 +32688,78 @@ }, "type": "object" }, + "InstanceManagedByIgmError": { + "id": "InstanceManagedByIgmError", + "properties": { + "error": { + "$ref": "InstanceManagedByIgmErrorManagedInstanceError", + "description": "[Output Only] Contents of the error." + }, + "instanceActionDetails": { + "$ref": "InstanceManagedByIgmErrorInstanceActionDetails", + "description": "[Output Only] Details of the instance action that triggered this error. May be null, if the error was not caused by an action on an instance. This field is optional." + }, + "timestamp": { + "description": "[Output Only] The time that this error occurred. This value is in RFC3339 text format.", + "type": "string" + } + }, + "type": "object" + }, + "InstanceManagedByIgmErrorInstanceActionDetails": { + "id": "InstanceManagedByIgmErrorInstanceActionDetails", + "properties": { + "action": { + "description": "[Output Only] Action that managed instance group was executing on the instance when the error occurred. Possible values:", + "enum": [ + "ABANDONING", + "CREATING", + "CREATING_WITHOUT_RETRIES", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING", + "VERIFYING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "instance": { + "description": "[Output Only] The URL of the instance. The URL can be set even if the instance has not yet been created.", + "type": "string" + }, + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Version this instance was created from, or was being created from, but the creation failed. Corresponds to one of the versions that were set on the Instance Group Manager resource at the time this instance was being created." + } + }, + "type": "object" + }, + "InstanceManagedByIgmErrorManagedInstanceError": { + "id": "InstanceManagedByIgmErrorManagedInstanceError", + "properties": { + "code": { + "description": "[Output Only] Error code.", + "type": "string" + }, + "message": { + "description": "[Output Only] Error message.", + "type": "string" + } + }, + "type": "object" + }, "InstanceMoveRequest": { "id": "InstanceMoveRequest", "properties": { @@ -30610,6 +32835,13 @@ "$ref": "ReservationAffinity", "description": "Specifies the reservations that this instance can consume from." }, + "resourcePolicies": { + "description": "Resource policies (names, not ULRs) applied to instances created from this template.", + "items": { + "type": "string" + }, + "type": "array" + }, "scheduling": { "$ref": "Scheduling", "description": "Specifies the scheduling options for the instances that are created from this template." @@ -30642,7 +32874,7 @@ "type": "object" }, "InstanceTemplate": { - "description": "Represents an Instance Template resource.\n\nYou can use instance templates to create VM instances and managed instance groups. For more information, read Instance Templates. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", + "description": "Represents an Instance Template resource.\n\nYou can use instance templates to create VM instances and managed instance groups. For more information, read Instance Templates. (== resource_for {$api_version}.instanceTemplates ==)", "id": "InstanceTemplate", "properties": { "creationTimestamp": { @@ -30821,6 +33053,7 @@ "status": { "description": "[Output Only] The status of the instance.", "enum": [ + "DEPROVISIONING", "PROVISIONING", "REPAIRING", "RUNNING", @@ -30840,6 +33073,7 @@ "", "", "", + "", "" ], "type": "string" @@ -30847,6 +33081,32 @@ }, "type": "object" }, + "InstancesAddResourcePoliciesRequest": { + "id": "InstancesAddResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be added to this instance.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesRemoveResourcePoliciesRequest": { + "id": "InstancesRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this instance.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "InstancesScopedList": { "id": "InstancesScopedList", "properties": { @@ -31039,7 +33299,7 @@ "type": "object" }, "Interconnect": { - "description": "Represents an Interconnect resource.\n\nAn Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", + "description": "Represents an Interconnect resource.\n\nAn Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview. (== resource_for {$api_version}.interconnects ==)", "id": "Interconnect", "properties": { "adminEnabled": { @@ -31187,7 +33447,7 @@ "type": "object" }, "InterconnectAttachment": { - "description": "Represents an Interconnect Attachment (VLAN) resource.\n\nYou can use Interconnect attachments (VLANS) to connect your Virtual Private Cloud networks to your on-premises networks through an Interconnect. For more information, read Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", + "description": "Represents an Interconnect Attachment (VLAN) resource.\n\nYou can use Interconnect attachments (VLANS) to connect your Virtual Private Cloud networks to your on-premises networks through an Interconnect. For more information, read Creating VLAN Attachments. (== resource_for {$api_version}.interconnectAttachments ==)", "id": "InterconnectAttachment", "properties": { "adminEnabled": { @@ -31227,7 +33487,7 @@ "type": "string" }, "candidateSubnets": { - "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google?s edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", + "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google's edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", "items": { "type": "string" }, @@ -31600,7 +33860,7 @@ "id": "InterconnectAttachmentPartnerMetadata", "properties": { "interconnectName": { - "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner?s portal. For instance \"Chicago 1\". This value may be validated to match approved Partner values.", + "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance \"Chicago 1\". This value may be validated to match approved Partner values.", "type": "string" }, "partnerName": { @@ -31608,7 +33868,7 @@ "type": "string" }, "portalUrl": { - "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "description": "URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", "type": "string" } }, @@ -31740,7 +34000,7 @@ "type": "object" }, "InterconnectDiagnostics": { - "description": "Diagnostics information about interconnect, contains detailed and current technical information about Google?s side of the connection.", + "description": "Diagnostics information about interconnect, contains detailed and current technical information about Google's side of the connection.", "id": "InterconnectDiagnostics", "properties": { "arpCaches": { @@ -31783,11 +34043,11 @@ "id": "InterconnectDiagnosticsLinkLACPStatus", "properties": { "googleSystemId": { - "description": "System ID of the port on Google?s side of the LACP exchange.", + "description": "System ID of the port on Google's side of the LACP exchange.", "type": "string" }, "neighborSystemId": { - "description": "System ID of the port on the neighbor?s side of the LACP exchange.", + "description": "System ID of the port on the neighbor's side of the LACP exchange.", "type": "string" }, "state": { @@ -32315,7 +34575,7 @@ "type": "object" }, "License": { - "description": "A license resource.", + "description": "Represents a License resource.\n\nA License represents billing and aggregate usage data for public and marketplace images. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. (== resource_for {$api_version}.licenses ==)", "id": "License", "properties": { "chargesUseFee": { @@ -32370,6 +34630,7 @@ "type": "object" }, "LicenseCode": { + "description": "Represents a License Code resource.\n\nA License Code is a unique identifier used to represent a license resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. (== resource_for {$api_version}.licenseCodes ==)", "id": "LicenseCode", "properties": { "creationTimestamp": { @@ -32617,7 +34878,7 @@ "type": "object" }, "LogConfigCounterOptions": { - "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nSupported field names: - \"authority\", which is \"[token]\" if IAMContext.token is present, otherwise the value of IAMContext.authority_selector if present, and otherwise a representation of IAMContext.principal; or - \"iam_principal\", a representation of IAMContext.principal even if a token or authority selector is present; or - \"\" (empty string), resulting in a counter with no fields.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nTODO(b/141846426): Consider supporting \"authority\" and \"iam_principal\" fields in the same counter.", + "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nSupported field names: - \"authority\", which is \"[token]\" if IAMContext.token is present, otherwise the value of IAMContext.authority_selector if present, and otherwise a representation of IAMContext.principal; or - \"iam_principal\", a representation of IAMContext.principal even if a token or authority selector is present; or - \"\" (empty string), resulting in a counter with no fields.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/debug_access_count {iam_principal=[value of IAMContext.principal]}", "id": "LogConfigCounterOptions", "properties": { "customFields": { @@ -32658,7 +34919,7 @@ "id": "LogConfigDataAccessOptions", "properties": { "logMode": { - "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is currently supported in the LocalIAM implementation, Stubby C++, and Stubby Java. For Apps Framework, see go/af-audit-logging#failclosed. TODO(b/77591626): Add support for Stubby Go. TODO(b/129671387): Add support for Scaffolding.", "enum": [ "LOG_FAIL_CLOSED", "LOG_MODE_UNSPECIFIED" @@ -32673,7 +34934,7 @@ "type": "object" }, "MachineType": { - "description": "Represents a Machine Type resource.\n\nYou can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", + "description": "Represents a Machine Type resource.\n\nYou can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types. (== resource_for {$api_version}.machineTypes ==)", "id": "MachineType", "properties": { "creationTimestamp": { @@ -33114,9 +35375,17 @@ "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", "type": "string" }, + "instanceHealth": { + "description": "[Output Only] Health state of the instance per health-check.", + "items": { + "$ref": "ManagedInstanceInstanceHealth" + }, + "type": "array" + }, "instanceStatus": { "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", "enum": [ + "DEPROVISIONING", "PROVISIONING", "REPAIRING", "RUNNING", @@ -33136,6 +35405,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33151,6 +35421,34 @@ }, "type": "object" }, + "ManagedInstanceInstanceHealth": { + "id": "ManagedInstanceInstanceHealth", + "properties": { + "detailedHealthState": { + "description": "[Output Only] The current detailed instance health state.", + "enum": [ + "DRAINING", + "HEALTHY", + "TIMEOUT", + "UNHEALTHY", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "healthCheck": { + "description": "[Output Only] The URL for the health check that verifies whether the instance is healthy.", + "type": "string" + } + }, + "type": "object" + }, "ManagedInstanceLastAttempt": { "id": "ManagedInstanceLastAttempt", "properties": { @@ -33246,7 +35544,7 @@ "type": "object" }, "MetadataFilter": { - "description": "Opaque filter criteria used by loadbalancers to restrict routing configuration to a limited set of loadbalancing proxies. Proxies and sidecars involved in loadbalancing would typically present metadata to the loadbalancers which need to match criteria specified here. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nAn example for using metadataFilters would be: if loadbalancing involves Envoys, they will only receive routing configuration when values in metadataFilters match values supplied in \u003ca href=\"https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/base.proto#envoy-api-msg-core-node\" Node metadata of their XDS requests to loadbalancers.", + "description": "Opaque filter criteria used by loadbalancers to restrict routing configuration to a limited set of loadbalancing proxies. Proxies and sidecars involved in loadbalancing would typically present metadata to the loadbalancers which need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata.\nAn example for using metadataFilters would be: if loadbalancing involves Envoys, they will only receive routing configuration when values in metadataFilters match values supplied in \u003ca href=\"https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/base.proto#envoy-api-msg-core-node\" Node metadata of their XDS requests to loadbalancers.", "id": "MetadataFilter", "properties": { "filterLabels": { @@ -33305,7 +35603,7 @@ "type": "object" }, "Network": { - "description": "Represents a VPC Network resource.\n\nNetworks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", + "description": "Represents a VPC Network resource.\n\nNetworks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for {$api_version}.networks ==)", "id": "Network", "properties": { "IPv4Range": { @@ -33346,7 +35644,7 @@ "compute.networks.insert" ] }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -33376,9 +35674,13 @@ "type": "object" }, "NetworkEndpoint": { - "description": "The network endpoint.", + "description": "The network endpoint. Next ID: 7", "id": "NetworkEndpoint", "properties": { + "fqdn": { + "description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.", + "type": "string" + }, "instance": { "description": "The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.\n\nThe name must be 1-63 characters long, and comply with RFC1035.", "type": "string" @@ -33396,7 +35698,7 @@ "type": "object" }, "NetworkEndpointGroup": { - "description": "Represents a collection of network endpoints.\n\nFor more information read Setting up network endpoint groups in load balancing. (== resource_for v1.networkEndpointGroups ==) (== resource_for beta.networkEndpointGroups ==)", + "description": "Represents a collection of network endpoints.\n\nFor more information read Network endpoint groups overview. (== resource_for {$api_version}.networkEndpointGroups ==) Next ID: 21", "id": "NetworkEndpointGroup", "properties": { "creationTimestamp": { @@ -33431,11 +35733,15 @@ "type": "string" }, "networkEndpointType": { - "description": "Type of network endpoints in this network endpoint group. Currently the only supported value is GCE_VM_IP_PORT.", + "description": "Type of network endpoints in this network endpoint group.", "enum": [ - "GCE_VM_IP_PORT" + "GCE_VM_IP_PORT", + "INTERNET_FQDN_PORT", + "INTERNET_IP_PORT" ], "enumDescriptions": [ + "", + "", "" ], "type": "string" @@ -34217,9 +36523,13 @@ "type": "object" }, "NodeGroup": { - "description": "Represent a sole-tenant Node Group resource.\n\nA sole-tenant node is a physical server that is dedicated to hosting VM instances only for your specific project. Use sole-tenant nodes to keep your instances physically separated from instances in other projects, or to group your instances together on the same host hardware. For more information, read Sole-tenant nodes. (== resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==) NextID: 16", + "description": "Represent a sole-tenant Node Group resource.\n\nA sole-tenant node is a physical server that is dedicated to hosting VM instances only for your specific project. Use sole-tenant nodes to keep your instances physically separated from instances in other projects, or to group your instances together on the same host hardware. For more information, read Sole-tenant nodes. (== resource_for {$api_version}.nodeGroups ==)", "id": "NodeGroup", "properties": { + "autoscalingPolicy": { + "$ref": "NodeGroupAutoscalingPolicy", + "description": "Specifies how autoscaling should behave." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -34228,6 +36538,10 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "format": "byte", + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -34238,6 +36552,22 @@ "description": "[Output Only] The type of the resource. Always compute#nodeGroup for node group.", "type": "string" }, + "maintenancePolicy": { + "description": "Specifies how to handle instances when a node in the group undergoes maintenance.", + "enum": [ + "DEFAULT", + "MAINTENANCE_POLICY_UNSPECIFIED", + "MIGRATE_WITHIN_NODE_GROUP", + "RESTART_IN_PLACE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, "name": { "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "type": "string" @@ -34389,6 +36719,38 @@ }, "type": "object" }, + "NodeGroupAutoscalingPolicy": { + "id": "NodeGroupAutoscalingPolicy", + "properties": { + "maxNodes": { + "description": "The maximum number of nodes that the group should have.", + "format": "int32", + "type": "integer" + }, + "minNodes": { + "description": "The minimum number of nodes that the group should have.", + "format": "int32", + "type": "integer" + }, + "mode": { + "description": "The autoscaling mode.", + "enum": [ + "MODE_UNSPECIFIED", + "OFF", + "ON", + "ONLY_SCALE_OUT" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "NodeGroupList": { "description": "Contains a list of nodeGroups.", "id": "NodeGroupList", @@ -34523,6 +36885,10 @@ "$ref": "ServerBinding", "description": "Binding properties for the physical server." }, + "serverId": { + "description": "Server ID associated with this node.", + "type": "string" + }, "status": { "enum": [ "CREATING", @@ -34783,7 +37149,7 @@ "type": "object" }, "NodeTemplate": { - "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for beta.nodeTemplates ==) (== resource_for v1.nodeTemplates ==) (== NextID: 16 ==)", + "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==) (== NextID: 19 ==)", "id": "NodeTemplate", "properties": { "creationTimestamp": { @@ -35192,7 +37558,7 @@ "type": "object" }, "NodeType": { - "description": "Represent a sole-tenant Node Type resource.\n\nEach node within a node group must have a node type. A node type specifies the total amount of cores and memory for that node. Currently, the only available node type is n1-node-96-624 node type that has 96 vCPUs and 624 GB of memory, available in multiple zones. For more information read Node types. (== resource_for beta.nodeTypes ==) (== resource_for v1.nodeTypes ==)", + "description": "Represent a sole-tenant Node Type resource.\n\nEach node within a node group must have a node type. A node type specifies the total amount of cores and memory for that node. Currently, the only available node type is n1-node-96-624 node type that has 96 vCPUs and 624 GB of memory, available in multiple zones. For more information read Node types. (== resource_for {$api_version}.nodeTypes ==)", "id": "NodeType", "properties": { "cpuPlatform": { @@ -35571,7 +37937,7 @@ "type": "object" }, "Operation": { - "description": "Represents an Operation resource.\n\nYou can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses.\n\nOperations can be global, regional or zonal. \n- For global operations, use the globalOperations resource. \n- For regional operations, use the regionOperations resource. \n- For zonal operations, use the zonalOperations resource. \n\nFor more information, read Global, Regional, and Zonal Resources. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", + "description": "Represents an Operation resource.\n\nGoogle Compute Engine has three Operation resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) * [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) * [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)\n\nYou can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses.\n\nOperations can be global, regional or zonal. \n- For global operations, use the `globalOperations` resource. \n- For regional operations, use the `regionOperations` resource. \n- For zonal operations, use the `zonalOperations` resource. \n\nFor more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==)", "id": "Operation", "properties": { "clientOperationId": { @@ -35618,11 +37984,11 @@ "type": "object" }, "httpErrorMessage": { - "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND.", + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`.", "type": "string" }, "httpErrorStatusCode": { - "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", + "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found.", "format": "int32", "type": "integer" }, @@ -35637,7 +38003,7 @@ }, "kind": { "default": "compute#operation", - "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", + "description": "[Output Only] Type of the resource. Always `compute#operation` for Operation resources.", "type": "string" }, "name": { @@ -35645,7 +38011,7 @@ "type": "string" }, "operationType": { - "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on.", + "description": "[Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on.", "type": "string" }, "progress": { @@ -35666,7 +38032,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", + "description": "[Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`.", "enum": [ "DONE", "PENDING", @@ -35693,7 +38059,7 @@ "type": "string" }, "user": { - "description": "[Output Only] User who requested the operation, for example: user@example.com.", + "description": "[Output Only] User who requested the operation, for example: `user@example.com`.", "type": "string" }, "warnings": { @@ -35804,11 +38170,11 @@ }, "kind": { "default": "compute#operationAggregatedList", - "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", + "description": "[Output Only] Type of resource. Always `compute#operationAggregatedList` for aggregated lists of operations.", "type": "string" }, "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than `maxResults`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results.", "type": "string" }, "selfLink": { @@ -35916,7 +38282,501 @@ }, "kind": { "default": "compute#operationList", - "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", + "description": "[Output Only] Type of resource. Always `compute#operations` for Operations resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than `maxResults`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "OperationsScopedList": { + "id": "OperationsScopedList", + "properties": { + "operations": { + "description": "[Output Only] A list of operations contained in this scope.", + "items": { + "$ref": "Operation" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "OutlierDetection": { + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service.", + "id": "OutlierDetection", + "properties": { + "baseEjectionTime": { + "$ref": "Duration", + "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." + }, + "consecutiveErrors": { + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "consecutiveGatewayFailure": { + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveErrors": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveGatewayFailure": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "enforcingSuccessRate": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "interval": { + "$ref": "Duration", + "description": "Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second." + }, + "maxEjectionPercent": { + "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%.", + "format": "int32", + "type": "integer" + }, + "successRateMinimumHosts": { + "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "successRateRequestVolume": { + "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "successRateStdevFactor": { + "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "PacketMirroring": { + "description": "Represents a PacketMirroring resource.", + "id": "PacketMirroring", + "properties": { + "collectorIlb": { + "$ref": "PacketMirroringForwardingRuleInfo", + "description": "The Forwarding Rule resource of type loadBalancingScheme=INTERNAL that will be used as collector for mirrored traffic. The specified forwarding rule must have isMirroringCollector set to true." + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "enable": { + "description": "Indicates whether or not this packet mirroring takes effect. If set to FALSE, this packet mirroring policy will not be enforced on the network.\n\nThe default is TRUE.", + "enum": [ + "FALSE", + "TRUE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "filter": { + "$ref": "PacketMirroringFilter", + "description": "Filter for mirrored traffic. If unspecified, all traffic is mirrored." + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#packetMirroring", + "description": "[Output Only] Type of the resource. Always compute#packetMirroring for packet mirrorings.", + "type": "string" + }, + "mirroredResources": { + "$ref": "PacketMirroringMirroredResourceInfo", + "description": "PacketMirroring mirroredResourceInfos. MirroredResourceInfo specifies a set of mirrored VM instances, subnetworks and/or tags for which traffic from/to all VM instances will be mirrored." + }, + "name": { + "annotations": { + "required": [ + "compute.packetMirrorings.insert" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "$ref": "PacketMirroringNetworkInfo", + "annotations": { + "required": [ + "compute.packetMirrorings.insert" + ] + }, + "description": "Specifies the mirrored VPC network. Only packets in this network will be mirrored. All mirrored VMs should have a NIC in the given network. All mirrored subnetworks should belong to the given network." + }, + "priority": { + "description": "The priority of applying this configuration. Priority is used to break ties in cases where there is more than one matching rule. In the case of two rules that apply for a given Instance, the one with the lowest-numbered priority value wins.\n\nDefault value is 1000. Valid range is 0 through 65535.", + "format": "uint32", + "type": "integer" + }, + "region": { + "description": "[Output Only] URI of the region where the packetMirroring resides.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringAggregatedList": { + "description": "Contains a list of packetMirrorings.", + "id": "PacketMirroringAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "PacketMirroringsScopedList", + "description": "Name of the scope containing this set of packetMirrorings." + }, + "description": "A list of PacketMirroring resources.", + "type": "object" + }, + "kind": { + "default": "compute#packetMirroringAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "PacketMirroringFilter": { + "id": "PacketMirroringFilter", + "properties": { + "IPProtocols": { + "description": "Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.", + "items": { + "type": "string" + }, + "type": "array" + }, + "cidrRanges": { + "description": "IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "PacketMirroringForwardingRuleInfo": { + "id": "PacketMirroringForwardingRuleInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the forwarding rule; defined by the server.", + "type": "string" + }, + "url": { + "description": "Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringList": { + "description": "Contains a list of PacketMirroring resources.", + "id": "PacketMirroringList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of PacketMirroring resources.", + "items": { + "$ref": "PacketMirroring" + }, + "type": "array" + }, + "kind": { + "default": "compute#packetMirroringList", + "description": "[Output Only] Type of resource. Always compute#packetMirroring for packetMirrorings.", "type": "string" }, "nextPageToken": { @@ -36011,18 +38871,87 @@ }, "type": "object" }, - "OperationsScopedList": { - "id": "OperationsScopedList", + "PacketMirroringMirroredResourceInfo": { + "id": "PacketMirroringMirroredResourceInfo", "properties": { - "operations": { - "description": "[Output Only] A list of operations contained in this scope.", + "instances": { + "description": "A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring.\n\nNote that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring.\n\nYou may specify a maximum of 50 Instances.", "items": { - "$ref": "Operation" + "$ref": "PacketMirroringMirroredResourceInfoInstanceInfo" + }, + "type": "array" + }, + "subnetworks": { + "description": "A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring.\n\nYou may specify a maximum of 5 subnetworks.", + "items": { + "$ref": "PacketMirroringMirroredResourceInfoSubnetInfo" + }, + "type": "array" + }, + "tags": { + "description": "A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "PacketMirroringMirroredResourceInfoInstanceInfo": { + "id": "PacketMirroringMirroredResourceInfoInstanceInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the instance; defined by the server.", + "type": "string" + }, + "url": { + "description": "Resource URL to the virtual machine instance which is being mirrored.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringMirroredResourceInfoSubnetInfo": { + "id": "PacketMirroringMirroredResourceInfoSubnetInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the subnetwork; defined by the server.", + "type": "string" + }, + "url": { + "description": "Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringNetworkInfo": { + "id": "PacketMirroringNetworkInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the network; defined by the server.", + "type": "string" + }, + "url": { + "description": "URL of the network resource.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringsScopedList": { + "id": "PacketMirroringsScopedList", + "properties": { + "packetMirrorings": { + "description": "A list of packetMirrorings contained in this scope.", + "items": { + "$ref": "PacketMirroring" }, "type": "array" }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "description": "Informational warning which replaces the list of packetMirrorings when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -36105,66 +39034,6 @@ }, "type": "object" }, - "OutlierDetection": { - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service.", - "id": "OutlierDetection", - "properties": { - "baseEjectionTime": { - "$ref": "Duration", - "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." - }, - "consecutiveErrors": { - "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", - "format": "int32", - "type": "integer" - }, - "consecutiveGatewayFailure": { - "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", - "format": "int32", - "type": "integer" - }, - "enforcingConsecutiveErrors": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", - "format": "int32", - "type": "integer" - }, - "enforcingConsecutiveGatewayFailure": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", - "format": "int32", - "type": "integer" - }, - "enforcingSuccessRate": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", - "format": "int32", - "type": "integer" - }, - "interval": { - "$ref": "Duration", - "description": "Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second." - }, - "maxEjectionPercent": { - "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%.", - "format": "int32", - "type": "integer" - }, - "successRateMinimumHosts": { - "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", - "format": "int32", - "type": "integer" - }, - "successRateRequestVolume": { - "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", - "format": "int32", - "type": "integer" - }, - "successRateStdevFactor": { - "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, "PathMatcher": { "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", "id": "PathMatcher", @@ -36179,7 +39048,7 @@ }, "defaultUrlRedirect": { "$ref": "HttpRedirectAction", - "description": "When when none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", @@ -36201,7 +39070,7 @@ "type": "array" }, "routeRules": { - "description": "The list of ordered HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. The order of specifying routeRules matters: the first rule that matches will cause its specified routing action to take effect.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.\nrouteRules are not supported in UrlMaps intended for External Load balancers.", + "description": "The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number.\nWithin a given pathMatcher, you can set only one of pathRules or routeRules.", "items": { "$ref": "HttpRouteRule" }, @@ -36236,8 +39105,23 @@ }, "type": "object" }, + "PerInstanceConfig": { + "id": "PerInstanceConfig", + "properties": { + "fingerprint": { + "description": "Fingerprint of this per-instance config. This field can be used in optimistic locking. It is ignored when inserting a per-instance config. An up-to-date fingerprint must be provided in order to update an existing per-instance config or the field needs to be unset.", + "format": "byte", + "type": "string" + }, + "name": { + "description": "The name of a per-instance config and its corresponding instance. Serves as a merge key during UpdatePerInstanceConfigs operations, that is, if a per-instance config with the same name exists then it will be updated, otherwise a new one will be created for the VM instance with the same name. An attempt to create a per-instance config for a VM instance that either doesn't exist or is not part of the group will result in an error.", + "type": "string" + } + }, + "type": "object" + }, "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions (defined by IAM or configured by users). A `binding` can optionally specify a `condition`, which is a logic expression that further constrains the role binding based on attributes about the request and/or target resource.\n\n**JSON Example**\n\n{ \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [\"user:eve@example.com\"], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ] }\n\n**YAML Example**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam/docs).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.\n\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role.\n\nOptionally, a `binding` can specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both.\n\n**JSON example:**\n\n{ \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [\"user:eve@example.com\"], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 }\n\n**YAML example:**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3\n\nFor a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -36248,14 +39132,14 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally may specify a `condition` that determines when binding is in effect. `bindings` with no members will result in an error.", + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten. Due to blind-set semantics of an etag-less policy, 'setIamPolicy' will not fail even if either of incoming or stored policy does not meet the version requirements.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", "format": "byte", "type": "string" }, @@ -36271,15 +39155,28 @@ "type": "array" }, "version": { - "description": "Specifies the format of the policy.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.\n\nOperations affecting conditional bindings must specify version 3. This can be either setting a conditional policy, modifying a conditional binding, or removing a conditional binding from the stored conditional policy. Operations on non-conditional policies may specify any valid value or leave the field unset.\n\nIf no etag is provided in the call to `setIamPolicy`, any version compliance checks on the incoming and/or stored policy is skipped.", + "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected.\n\nAny operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.", "format": "int32", "type": "integer" } }, "type": "object" }, + "PreconfiguredWafSet": { + "id": "PreconfiguredWafSet", + "properties": { + "expressionSets": { + "description": "List of entities that are currently supported for WAF rules.", + "items": { + "$ref": "WafExpressionSet" + }, + "type": "array" + } + }, + "type": "object" + }, "Project": { - "description": "Represents a Project resource.\n\nA project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", + "description": "Represents a Project resource.\n\nA project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy. (== resource_for {$api_version}.projects ==)", "id": "Project", "properties": { "commonInstanceMetadata": { @@ -36443,6 +39340,7 @@ "metric": { "description": "[Output Only] Name of the quota metric.", "enum": [ + "AFFINITY_GROUPS", "AUTOSCALERS", "BACKEND_BUCKETS", "BACKEND_SERVICES", @@ -36450,7 +39348,9 @@ "COMMITMENTS", "COMMITTED_C2_CPUS", "COMMITTED_CPUS", + "COMMITTED_LICENSES", "COMMITTED_LOCAL_SSD_TOTAL_GB", + "COMMITTED_N2D_CPUS", "COMMITTED_N2_CPUS", "COMMITTED_NVIDIA_K80_GPUS", "COMMITTED_NVIDIA_P100_GPUS", @@ -36476,14 +39376,18 @@ "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", "INTERCONNECT_TOTAL_GBPS", "INTERNAL_ADDRESSES", + "IN_PLACE_SNAPSHOTS", "IN_USE_ADDRESSES", "IN_USE_BACKUP_SCHEDULES", "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", "MACHINE_IMAGES", + "N2D_CPUS", "N2_CPUS", "NETWORKS", "NETWORK_ENDPOINT_GROUPS", + "NODE_GROUPS", + "NODE_TEMPLATES", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", @@ -36492,6 +39396,7 @@ "NVIDIA_T4_GPUS", "NVIDIA_T4_VWS_GPUS", "NVIDIA_V100_GPUS", + "PACKET_MIRRORINGS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", "PREEMPTIBLE_NVIDIA_K80_GPUS", @@ -36502,6 +39407,8 @@ "PREEMPTIBLE_NVIDIA_T4_GPUS", "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", "PREEMPTIBLE_NVIDIA_V100_GPUS", + "PUBLIC_ADVERTISED_PREFIXES", + "PUBLIC_DELEGATED_PREFIXES", "REGIONAL_AUTOSCALERS", "REGIONAL_INSTANCE_GROUP_MANAGERS", "RESERVATIONS", @@ -36509,11 +39416,13 @@ "ROUTERS", "ROUTES", "SECURITY_POLICIES", + "SECURITY_POLICY_CEVAL_RULES", "SECURITY_POLICY_RULES", "SNAPSHOTS", "SSD_TOTAL_GB", "SSL_CERTIFICATES", "STATIC_ADDRESSES", + "STATIC_BYOIP_ADDRESSES", "SUBNETWORKS", "TARGET_HTTPS_PROXIES", "TARGET_HTTP_PROXIES", @@ -36608,6 +39517,18 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -36649,7 +39570,7 @@ "type": "object" }, "Region": { - "description": "Represents a Region resource.\n\nA region is a geographical area where a resource is located. For more information, read Regions and Zones. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", + "description": "Represents a Region resource.\n\nA region is a geographical area where a resource is located. For more information, read Regions and Zones. (== resource_for {$api_version}.regions ==)", "id": "Region", "properties": { "creationTimestamp": { @@ -37208,6 +40129,66 @@ }, "type": "object" }, + "RegionInstanceGroupManagersApplyUpdatesRequest": { + "description": "InstanceGroupManagers.applyUpdatesToInstances", + "id": "RegionInstanceGroupManagersApplyUpdatesRequest", + "properties": { + "instances": { + "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + }, + "type": "array" + }, + "minimalAction": { + "description": "The minimal action that you want to perform on each instance during the update: \n- REPLACE: At minimum, delete the instance and create it again. \n- RESTART: Stop the instance and start it again. \n- REFRESH: Do not stop the instance. \n- NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that you want to perform on each instance during the update: \n- REPLACE: Delete the instance and create it again. \n- RESTART: Stop the instance and start it again. \n- REFRESH: Do not stop the instance. \n- NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersCreateInstancesRequest": { + "description": "RegionInstanceGroupManagers.createInstances", + "id": "RegionInstanceGroupManagersCreateInstancesRequest", + "properties": { + "instances": { + "description": "[Required] List of specifications of per-instance configs.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, "RegionInstanceGroupManagersDeleteInstancesRequest": { "id": "RegionInstanceGroupManagersDeleteInstancesRequest", "properties": { @@ -37221,6 +40202,23 @@ }, "type": "object" }, + "RegionInstanceGroupManagersListErrorsResponse": { + "id": "RegionInstanceGroupManagersListErrorsResponse", + "properties": { + "items": { + "description": "[Output Only] The list of errors of the managed instance group.", + "items": { + "$ref": "InstanceManagedByIgmError" + }, + "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + } + }, + "type": "object" + }, "RegionInstanceGroupManagersListInstancesResponse": { "id": "RegionInstanceGroupManagersListInstancesResponse", "properties": { @@ -37230,6 +40228,10 @@ "$ref": "ManagedInstance" }, "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" } }, "type": "object" @@ -37614,7 +40616,7 @@ "type": "object" }, "Reservation": { - "description": "Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. (== resource_for beta.reservations ==) (== resource_for v1.reservations ==)", + "description": "Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. (== resource_for {$api_version}.reservations ==)", "id": "Reservation", "properties": { "commitment": { @@ -38188,6 +41190,7 @@ "type": "object" }, "ResourcePolicy": { + "description": "Represents a Resource Policy resource. You can use resource policies to schedule actions for some Compute Engine resources. For example, you can use them to schedule persistent disk snapshots.\n\n(== resource_for {$api_version}.resourcePolicies ==)", "id": "ResourcePolicy", "properties": { "creationTimestamp": { @@ -38197,6 +41200,10 @@ "description": { "type": "string" }, + "groupPlacementPolicy": { + "$ref": "ResourcePolicyGroupPlacementPolicy", + "description": "Resource policy for instacnes for placement configuration." + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -38368,7 +41375,7 @@ "id": "ResourcePolicyDailyCycle", "properties": { "daysInCycle": { - "description": "Defines a schedule that runs every nth day of the month.", + "description": "Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle.", "format": "int32", "type": "integer" }, @@ -38383,6 +41390,35 @@ }, "type": "object" }, + "ResourcePolicyGroupPlacementPolicy": { + "description": "A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality", + "id": "ResourcePolicyGroupPlacementPolicy", + "properties": { + "availabilityDomainCount": { + "description": "The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network", + "format": "int32", + "type": "integer" + }, + "collocation": { + "description": "Specifies network collocation", + "enum": [ + "COLLOCATED", + "UNSPECIFIED_COLLOCATION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "vmCount": { + "description": "Number of vms in this placement group", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "ResourcePolicyHourlyCycle": { "description": "Time window specified for hourly operations.", "id": "ResourcePolicyHourlyCycle", @@ -38392,7 +41428,7 @@ "type": "string" }, "hoursInCycle": { - "description": "Allows to define schedule that runs every nth hour.", + "description": "Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle.", "format": "int32", "type": "integer" }, @@ -38583,7 +41619,7 @@ "id": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", "properties": { "guestFlush": { - "description": "Indication to perform a ?guest aware? snapshot.", + "description": "Indication to perform a 'guest aware' snapshot.", "type": "boolean" }, "labels": { @@ -38621,7 +41657,7 @@ "id": "ResourcePolicyWeeklyCycleDayOfWeek", "properties": { "day": { - "description": "Allows to define schedule that runs specified day of the week.", + "description": "Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY.", "enum": [ "FRIDAY", "INVALID", @@ -38656,7 +41692,7 @@ "type": "object" }, "Route": { - "description": "Represents a Route resource.\n\nA route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", + "description": "Represents a Route resource.\n\nA route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview. (== resource_for {$api_version}.routes ==)", "id": "Route", "properties": { "creationTimestamp": { @@ -39301,11 +42337,11 @@ "type": "string" }, "linkedInterconnectAttachment": { - "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be either be a VPN tunnel or an Interconnect attachment.", + "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance.", "type": "string" }, "linkedVpnTunnel": { - "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be either a VPN tunnel or an Interconnect attachment.", + "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance.", "type": "string" }, "managementType": { @@ -39942,7 +42978,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, SSL health check follows behavior specified in\nport\nand\nportName\nfields.", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, SSL health check follows behavior specified in port and portName fields.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -39979,7 +43015,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 9", + "description": "Sets the scheduling options for an Instance. NextID: 10", "id": "Scheduling", "properties": { "automaticRestart": { @@ -39987,7 +43023,7 @@ "type": "boolean" }, "nodeAffinities": { - "description": "A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information.", + "description": "A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity.", "items": { "$ref": "SchedulingNodeAffinity" }, @@ -40044,8 +43080,26 @@ }, "type": "object" }, + "SecurityPoliciesListPreconfiguredExpressionSetsResponse": { + "id": "SecurityPoliciesListPreconfiguredExpressionSetsResponse", + "properties": { + "preconfiguredExpressionSets": { + "$ref": "SecurityPoliciesWafConfig" + } + }, + "type": "object" + }, + "SecurityPoliciesWafConfig": { + "id": "SecurityPoliciesWafConfig", + "properties": { + "wafRules": { + "$ref": "PreconfiguredWafSet" + } + }, + "type": "object" + }, "SecurityPolicy": { - "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services that use load balancers can reference a Security Policy. For more information, read Cloud Armor Security Policy Concepts. (== resource_for v1.securityPolicies ==) (== resource_for beta.securityPolicies ==)", + "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services that use load balancers can reference a Security Policy. For more information, read Cloud Armor Security Policy Concepts. (== resource_for {$api_version}.securityPolicies ==)", "id": "SecurityPolicy", "properties": { "creationTimestamp": { @@ -40225,7 +43279,7 @@ }, "match": { "$ref": "SecurityPolicyRuleMatcher", - "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding ?action? is enforced." + "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." }, "preview": { "description": "If set to true, the specified action is not enforced.", @@ -40247,6 +43301,10 @@ "$ref": "SecurityPolicyRuleMatcherConfig", "description": "The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified." }, + "expr": { + "$ref": "Expr", + "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." + }, "versionedExpr": { "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", "enum": [ @@ -40422,7 +43480,7 @@ "type": "object" }, "Snapshot": { - "description": "Represents a Persistent Disk Snapshot resource.\n\nYou can use snapshots to back up data on a regular interval. For more information, read Creating persistent disk snapshots. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", + "description": "Represents a Persistent Disk Snapshot resource.\n\nYou can use snapshots to back up data on a regular interval. For more information, read Creating persistent disk snapshots. (== resource_for {$api_version}.snapshots ==)", "id": "Snapshot", "properties": { "autoCreated": { @@ -40442,6 +43500,11 @@ "format": "int64", "type": "string" }, + "downloadBytes": { + "description": "[Output Only] Number of bytes downloaded to restore a snapshot to a disk.", + "format": "int64", + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -40676,7 +43739,7 @@ "type": "object" }, "SslCertificate": { - "description": "Represents an SSL Certificate resource.\n\nThis SSL certificate resource also contains a private key. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and Using SSL Certificates. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", + "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates and SSL certificates quotas and limits. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", "id": "SslCertificate", "properties": { "certificate": { @@ -41163,7 +44226,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services used by HTTP or HTTPS load balancers can reference a Security Policy. For more information, read read Cloud Armor Security Policy Concepts. (== resource_for beta.sslPolicies ==) (== resource_for v1.sslPolicies ==)", + "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services used by HTTP or HTTPS load balancers can reference a Security Policy. For more information, read read Cloud Armor Security Policy Concepts. (== resource_for {$api_version}.sslPolicies ==)", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -41340,7 +44403,7 @@ "type": "object" }, "Subnetwork": { - "description": "Represents a Subnetwork resource.\n\nA subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", + "description": "Represents a Subnetwork resource.\n\nA subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for {$api_version}.subnetworks ==)", "id": "Subnetwork", "properties": { "creationTimestamp": { @@ -41414,7 +44477,7 @@ "type": "string" }, "role": { - "description": "The role of subnetwork. Currenly, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", "enum": [ "ACTIVE", "BACKUP" @@ -41703,6 +44766,10 @@ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is to disable flow logging.", "type": "boolean" }, + "filterExpr": { + "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged.", + "type": "string" + }, "flowSampling": { "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5, which means half of all collected logs are reported.", "format": "float", @@ -41711,14 +44778,23 @@ "metadata": { "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is INCLUDE_ALL_METADATA.", "enum": [ + "CUSTOM_METADATA", "EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA" ], "enumDescriptions": [ + "", "", "" ], "type": "string" + }, + "metadataFields": { + "description": "Can only be specified if VPC flow logs for this subnetwork is enabled and \"metadata\" was set to CUSTOM_METADATA.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -41864,7 +44940,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, TCP health check follows behavior specified in\nport\nand\nportName\nfields.", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in port is used for health checking.\nUSE_NAMED_PORT: The portName is used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, TCP health check follows behavior specified in port and portName fields.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -42014,7 +45090,7 @@ "type": "object" }, "TargetHttpProxy": { - "description": "Represents a Target HTTP Proxy resource.\n\nA target HTTP proxy is a component of certain types of load balancers. Global forwarding rules reference a target HTTP proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", + "description": "Represents a Target HTTP Proxy resource.\n\nGoogle Compute Engine has two Target HTTP Proxy resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/targetHttpProxies) * [Regional](/compute/docs/reference/rest/{$api_version}/regionTargetHttpProxies)\n\nA target HTTP proxy is a component of GCP HTTP load balancers.\n\n* targetHttpProxies are used by external HTTP load balancers and Traffic Director. * regionTargetHttpProxies are used by internal HTTP load balancers.\n\nForwarding rules reference a target HTTP proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts. (== resource_for {$api_version}.targetHttpProxies ==) (== resource_for {$api_version}.regionTargetHttpProxies ==)", "id": "TargetHttpProxy", "properties": { "creationTimestamp": { @@ -42326,7 +45402,7 @@ "type": "object" }, "TargetHttpsProxy": { - "description": "Represents a Target HTTPS Proxy resource.\n\nA target HTTPS proxy is a component of certain types of load balancers. Global forwarding rules reference a target HTTPS proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", + "description": "Represents a Target HTTPS Proxy resource.\n\nGoogle Compute Engine has two Target HTTPS Proxy resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/targetHttpsProxies) * [Regional](/compute/docs/reference/rest/{$api_version}/regionTargetHttpsProxies)\n\nA target HTTPS proxy is a component of GCP HTTPS load balancers.\n\n* targetHttpsProxies are used by external HTTPS load balancers. * regionTargetHttpsProxies are used by internal HTTPS load balancers.\n\nForwarding rules reference a target HTTPS proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts. (== resource_for {$api_version}.targetHttpsProxies ==) (== resource_for {$api_version}.regionTargetHttpsProxies ==)", "id": "TargetHttpsProxy", "properties": { "creationTimestamp": { @@ -42617,7 +45693,7 @@ "type": "object" }, "TargetInstance": { - "description": "Represents a Target Instance resource.\n\nYou can use a target instance to handle traffic for one or more forwarding rules, which is ideal for forwarding protocol traffic that is managed by a single source. For example, ESP, AH, TCP, or UDP. For more information, read Target instances. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", + "description": "Represents a Target Instance resource.\n\nYou can use a target instance to handle traffic for one or more forwarding rules, which is ideal for forwarding protocol traffic that is managed by a single source. For example, ESP, AH, TCP, or UDP. For more information, read Target instances. (== resource_for {$api_version}.targetInstances ==)", "id": "TargetInstance", "properties": { "creationTimestamp": { @@ -42987,7 +46063,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource.\n\nTarget pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", + "description": "Represents a Target Pool resource.\n\nTarget pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools. (== resource_for {$api_version}.targetPools ==)", "id": "TargetPool", "properties": { "backupPool": { @@ -43507,7 +46583,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource.\n\nA target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", + "description": "Represents a Target SSL Proxy resource.\n\nA target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies. (== resource_for {$api_version}.targetSslProxies ==)", "id": "TargetSslProxy", "properties": { "creationTimestamp": { @@ -43708,7 +46784,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource.\n\nA target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing Concepts. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", + "description": "Represents a Target TCP Proxy resource.\n\nA target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview. (== resource_for {$api_version}.targetTcpProxies ==)", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { @@ -43870,7 +46946,7 @@ "type": "object" }, "TargetVpnGateway": { - "description": "Represents a Target VPN Gateway resource.\n\nThe target VPN gateway resource represents a Classic Cloud VPN gateway. For more information, read the the Cloud VPN Overview. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", + "description": "Represents a Target VPN Gateway resource.\n\nThe target VPN gateway resource represents a Classic Cloud VPN gateway. For more information, read the the Cloud VPN Overview. (== resource_for {$api_version}.targetVpnGateways ==)", "id": "TargetVpnGateway", "properties": { "creationTimestamp": { @@ -44314,7 +47390,7 @@ "type": "object" }, "UrlMap": { - "description": "Represents a URL Map resource.\n\nA URL map resource is a component of certain types of load balancers. This resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use this resource, the backend service must have a loadBalancingScheme of either EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED For more information, read URL Map Concepts.", + "description": "Represents a URL Map resource.\n\nGoogle Compute Engine has two URL Map resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/urlMaps) * [Regional](/compute/docs/reference/rest/{$api_version}/regionUrlMaps)\n\nA URL map resource is a component of certain types of GCP load balancers and Traffic Director.\n\n* urlMaps are used by external HTTP(S) load balancers and Traffic Director. * regionUrlMaps are used by internal HTTP(S) load balancers.\n\nThis resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", "id": "UrlMap", "properties": { "creationTimestamp": { @@ -45140,7 +48216,7 @@ "type": "object" }, "VpnGateway": { - "description": "Represents a VPN gateway resource.", + "description": "Represents a VPN gateway resource. Next ID: 13", "id": "VpnGateway", "properties": { "creationTimestamp": { @@ -45643,7 +48719,7 @@ "type": "object" }, "VpnTunnel": { - "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==)", + "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for {$api_version}.vpnTunnels ==)", "id": "VpnTunnel", "properties": { "creationTimestamp": { @@ -46102,6 +49178,40 @@ }, "type": "object" }, + "WafExpressionSet": { + "id": "WafExpressionSet", + "properties": { + "aliases": { + "description": "A list of alternate IDs. The format should be: - E.g. XSS-stable Generic suffix like \"stable\" is particularly useful if a policy likes to avail newer set of expressions without having to change the policy. A given alias name can't be used for more than one entity set.", + "items": { + "type": "string" + }, + "type": "array" + }, + "expressions": { + "description": "List of available expressions.", + "items": { + "$ref": "WafExpressionSetExpression" + }, + "type": "array" + }, + "id": { + "description": "Google specified expression set ID. The format should be: - E.g. XSS-20170329", + "type": "string" + } + }, + "type": "object" + }, + "WafExpressionSetExpression": { + "id": "WafExpressionSetExpression", + "properties": { + "id": { + "description": "Expression ID should uniquely identify the origin of the expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core rule set version 2.9.1 rule id 973337. The ID could be used to determine the individual attack definition that has been detected. It could also be used to exclude it from the policy in case of false positive.", + "type": "string" + } + }, + "type": "object" + }, "WeightedBackendService": { "description": "In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple BackendServices. The volume of traffic for each BackendService is proportional to the weight specified in each WeightedBackendService", "id": "WeightedBackendService", @@ -46257,7 +49367,7 @@ "type": "object" }, "Zone": { - "description": "Represents a Zone resource.\n\nA zone is a deployment area. These deployment areas are subsets of a region. For example the zone us-east1-a is located in the us-east1 region. For more information, read Regions and Zones. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", + "description": "Represents a Zone resource.\n\nA zone is a deployment area. These deployment areas are subsets of a region. For example the zone us-east1-a is located in the us-east1 region. For more information, read Regions and Zones. (== resource_for {$api_version}.zones ==)", "id": "Zone", "properties": { "availableCpuPlatforms": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index b6be55c73..264ac77d4 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -56,6 +56,7 @@ import ( googleapi "google.golang.org/api/googleapi" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" htransport "google.golang.org/api/transport/http" ) @@ -72,6 +73,7 @@ var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint const apiId = "compute:v1" const apiName = "compute" @@ -111,6 +113,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -147,6 +150,7 @@ func New(client *http.Client) (*Service, error) { s.ForwardingRules = NewForwardingRulesService(s) s.GlobalAddresses = NewGlobalAddressesService(s) s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) + s.GlobalNetworkEndpointGroups = NewGlobalNetworkEndpointGroupsService(s) s.GlobalOperations = NewGlobalOperationsService(s) s.HealthChecks = NewHealthChecksService(s) s.HttpHealthChecks = NewHttpHealthChecksService(s) @@ -167,6 +171,7 @@ func New(client *http.Client) (*Service, error) { s.NodeGroups = NewNodeGroupsService(s) s.NodeTemplates = NewNodeTemplatesService(s) s.NodeTypes = NewNodeTypesService(s) + s.PacketMirrorings = NewPacketMirroringsService(s) s.Projects = NewProjectsService(s) s.RegionAutoscalers = NewRegionAutoscalersService(s) s.RegionBackendServices = NewRegionBackendServicesService(s) @@ -235,6 +240,8 @@ type Service struct { GlobalForwardingRules *GlobalForwardingRulesService + GlobalNetworkEndpointGroups *GlobalNetworkEndpointGroupsService + GlobalOperations *GlobalOperationsService HealthChecks *HealthChecksService @@ -275,6 +282,8 @@ type Service struct { NodeTypes *NodeTypesService + PacketMirrorings *PacketMirroringsService + Projects *ProjectsService RegionAutoscalers *RegionAutoscalersService @@ -463,6 +472,15 @@ type GlobalForwardingRulesService struct { s *Service } +func NewGlobalNetworkEndpointGroupsService(s *Service) *GlobalNetworkEndpointGroupsService { + rs := &GlobalNetworkEndpointGroupsService{s: s} + return rs +} + +type GlobalNetworkEndpointGroupsService struct { + s *Service +} + func NewGlobalOperationsService(s *Service) *GlobalOperationsService { rs := &GlobalOperationsService{s: s} return rs @@ -643,6 +661,15 @@ type NodeTypesService struct { s *Service } +func NewPacketMirroringsService(s *Service) *PacketMirroringsService { + rs := &PacketMirroringsService{s: s} + return rs +} + +type PacketMirroringsService struct { + s *Service +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} return rs @@ -1012,7 +1039,7 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { // (accelerators) that you can add to VM instances to improve or // accelerate performance when working with intensive workloads. For // more information, read GPUs on Compute Engine. (== resource_for -// beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==) +// {$api_version}.acceleratorTypes ==) type AcceleratorType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -1600,25 +1627,31 @@ func (s *AccessConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Address: Represents an IP Address resource. +// Address: Use global external addresses for GFE-based external HTTP(S) +// load balancers in Premium Tier. +// +// Use global internal addresses for reserved peering network +// range. +// +// Use regional external addresses for the following resources: +// +// - External IP addresses for VM instances - Regional external +// forwarding rules - Cloud NAT external IP addresses - GFE based LBs in +// Standard Tier - Network LBs in Premium or Standard Tier - Cloud VPN +// gateways (both Classic and HA) +// +// Use regional internal IP addresses for subnet IP ranges (primary and +// secondary). This includes: +// +// - Internal IP addresses for VM instances - Alias IP ranges of VM +// instances (/32 only) - Regional internal forwarding rules - Internal +// TCP/UDP load balancer addresses - Internal HTTP(S) load balancer +// addresses - Cloud DNS inbound forwarding IP addresses // -// An address resource represents a regional internal IP address. -// Regional internal IP addresses are RFC 1918 addresses that come from -// either a primary or secondary IP range of a subnet in a VPC network. -// Regional external IP addresses can be assigned to GCP VM instances, -// Cloud VPN gateways, regional external forwarding rules for network -// load balancers (in either Standard or Premium Tier), and regional -// external forwarding rules for HTTP(S), SSL Proxy, and TCP Proxy load -// balancers in Standard Tier. For more information, read IP -// addresses. +// For more information, read reserved IP address. // -// A globalAddresses resource represent a global external IP address. -// Global external IP addresses are IPv4 or IPv6 addresses. They can -// only be assigned to global forwarding rules for HTTP(S), SSL Proxy, -// or TCP Proxy load balancers in Premium Tier. For more information, -// read Global resources. (== resource_for beta.addresses ==) (== -// resource_for v1.addresses ==) (== resource_for beta.globalAddresses -// ==) (== resource_for v1.globalAddresses ==) +// (== resource_for {$api_version}.addresses ==) (== resource_for +// {$api_version}.globalAddresses ==) type Address struct { // Address: The static IP address represented by this resource. Address string `json:"address,omitempty"` @@ -2408,6 +2441,9 @@ type AttachedDisk struct { // group. DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // DiskSizeGb: The size of the disk in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // GuestOsFeatures: A list of features to enable on the guest operating // system. Applicable only for bootable images. Read Enabling guest // operating system features to see a list of available options. @@ -2432,8 +2468,7 @@ type AttachedDisk struct { // disks must always use SCSI and the request will fail if you attempt // to attach a persistent disk in any other format than SCSI. Local SSDs // can use either NVME or SCSI. For performance characteristics of SCSI - // over NVMe, see Local SSD performance. TODO(b/131765817): Update - // documentation when NVME is supported. + // over NVMe, see Local SSD performance. // // Possible values: // "NVME" @@ -2456,6 +2491,10 @@ type AttachedDisk struct { // "READ_WRITE" Mode string `json:"mode,omitempty"` + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state + // stored on disk + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` + // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of // initializeParams.sourceImage or initializeParams.sourceSnapshot or @@ -2513,14 +2552,16 @@ type AttachedDiskInitializeParams struct { Description string `json:"description,omitempty"` // DiskName: Specifies the disk name. If not specified, the default is - // to use the name of the instance. If the disk with the instance name - // exists already in the given zone/region, a new name will be - // automatically generated. + // to use the name of the instance. If a disk with the same name already + // exists in the given region, the existing disk is attached to the new + // instance and the new disk is not created. DiskName string `json:"diskName,omitempty"` - // DiskSizeGb: Specifies the size of the disk in base-2 GB. If not - // specified, the disk will be the same size as the image (usually - // 10GB). If specified, the size must be equal to or larger than 10GB. + // DiskSizeGb: Specifies the size of the disk in base-2 GB. The size + // must be at least 10 GB. If you specify a sourceImage, which is + // required for boot disks, the default size is the size of the + // sourceImage. If you do not specify a sourceImage, the default disk + // size is 500 GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` // DiskType: Specifies the disk type to use to create the instance. If @@ -2546,6 +2587,15 @@ type AttachedDiskInitializeParams struct { // persistent disks. Labels map[string]string `json:"labels,omitempty"` + // OnUpdateAction: Specifies which action to take on instance update + // with this disk. Default is to use the existing disk. + // + // Possible values: + // "RECREATE_DISK" + // "RECREATE_DISK_IF_SOURCE_CHANGED" + // "USE_EXISTING_DISK" + OnUpdateAction string `json:"onUpdateAction,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for // automatic snapshot creations. Specified using the full or partial // URL. For instance template, specify only the resource policy name. @@ -2784,7 +2834,11 @@ func (s *AuthorizationLoggingOptions) MarshalJSON() ([]byte, error) { // Autoscaler: Represents an Autoscaler resource. // +// Google Compute Engine has two Autoscaler resources: // +// * [Global](/compute/docs/reference/rest/{$api_version}/autoscalers) * +// [Regional](/compute/docs/reference/rest/{$api_version}/regionAutoscale +// rs) // // Use autoscalers to automatically add or delete instances from a // managed instance group according to your defined autoscaling policy. @@ -2794,9 +2848,8 @@ func (s *AuthorizationLoggingOptions) MarshalJSON() ([]byte, error) { // resource. // // For regional managed instance groups, use the regionAutoscalers -// resource. (== resource_for beta.autoscalers ==) (== resource_for -// v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== -// resource_for v1.regionAutoscalers ==) +// resource. (== resource_for {$api_version}.autoscalers ==) (== +// resource_for {$api_version}.regionAutoscalers ==) type Autoscaler struct { // AutoscalingPolicy: The configuration parameters for the autoscaling // algorithm. You can define one or more of the policies for an @@ -2832,6 +2885,13 @@ type Autoscaler struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // RecommendedSize: [Output Only] Target recommended MIG size (number of + // instances) computed by autoscaler. Autoscaler calculates recommended + // MIG size even when autoscaling policy mode is different from ON. This + // field is empty when autoscaler is not connected to the existing + // managed instance group or autoscaler did not generate its prediction. + RecommendedSize int64 `json:"recommendedSize,omitempty"` + // Region: [Output Only] URL of the region where the instance group // resides (for autoscalers living in regional scope). Region string `json:"region,omitempty"` @@ -3267,6 +3327,7 @@ type AutoscalerStatusDetails struct { // "MISSING_CUSTOM_METRIC_DATA_POINTS" // "MISSING_LOAD_BALANCING_DATA_POINTS" // "MODE_OFF" + // "MODE_ONLY_UP" // "MORE_THAN_ONE_BACKEND_SERVICE" // "NOT_ENOUGH_QUOTA_AVAILABLE" // "REGION_RESOURCE_STOCKOUT" @@ -3473,6 +3534,14 @@ type AutoscalingPolicy struct { // instances allowed. MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + // Mode: Defines operating mode for this policy. + // + // Possible values: + // "OFF" + // "ON" + // "ONLY_UP" + Mode string `json:"mode,omitempty"` + // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3682,8 +3751,9 @@ type Backend struct { // // If the loadBalancingScheme for the backend service is EXTERNAL (SSL // Proxy and TCP Proxy load balancers), you must also specify exactly - // one of the following parameters: maxConnections, - // maxConnectionsPerInstance, or maxConnectionsPerEndpoint. + // one of the following parameters: maxConnections (except for regional + // managed instance groups), maxConnectionsPerInstance, or + // maxConnectionsPerEndpoint. // // If the loadBalancingScheme for the backend service is INTERNAL // (internal TCP/UDP load balancers), you cannot specify any additional @@ -3693,11 +3763,11 @@ type Backend struct { // rate of HTTP requests per second (RPS). // You can use the RATE balancing mode if the protocol for the backend // service is HTTP or HTTPS. You must specify exactly one of the - // following parameters: maxRate, maxRatePerInstance, or - // maxRatePerEndpoint. + // following parameters: maxRate (except for regional managed instance + // groups), maxRatePerInstance, or maxRatePerEndpoint. // // - If the load balancing mode is UTILIZATION, the load is spread based - // on the CPU utilization of instances in an instance group. + // on the backend utilization of instances in an instance group. // You can use the UTILIZATION balancing mode if the loadBalancingScheme // of the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or // INTERNAL_MANAGED and the backends are instance groups. There are no @@ -3723,6 +3793,11 @@ type Backend struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Failover: This field designates whether this is a failover backend. + // More than one failover backend can be configured for a given + // BackendService. + Failover bool `json:"failover,omitempty"` + // Group: The fully-qualified URL of an instance group or network // endpoint group (NEG) resource. The type of backend that a backend // service supports depends on the backend service's @@ -3745,12 +3820,14 @@ type Backend struct { // Partial URLs are not supported. Group string `json:"group,omitempty"` - // MaxConnections: Defines a maximum target for simultaneous connections - // for the entire backend (instance group or NEG). If the backend's - // balancingMode is UTILIZATION, this is an optional parameter. If the - // backend's balancingMode is CONNECTION, and backend is attached to a - // backend service whose loadBalancingScheme is EXTERNAL, you must - // specify either this parameter, maxConnectionsPerInstance, or + // MaxConnections: Defines a target maximum number of simultaneous + // connections that the backend can handle. Valid for network endpoint + // group and instance group backends (except for regional managed + // instance groups). If the backend's balancingMode is UTILIZATION, this + // is an optional parameter. If the backend's balancingMode is + // CONNECTION, and backend is attached to a backend service whose + // loadBalancingScheme is EXTERNAL, you must specify either this + // parameter, maxConnectionsPerInstance, or // maxConnectionsPerEndpoint. // // Not available if the backend's balancingMode is RATE. If the @@ -3759,13 +3836,13 @@ type Backend struct { // CONNECTION. MaxConnections int64 `json:"maxConnections,omitempty"` - // MaxConnectionsPerEndpoint: Defines a maximum target for simultaneous - // connections for an endpoint of a NEG. This is multiplied by the - // number of endpoints in the NEG to implicitly calculate a maximum - // number of target maximum simultaneous connections for the NEG. If the - // backend's balancingMode is CONNECTION, and the backend is attached to - // a backend service whose loadBalancingScheme is EXTERNAL, you must - // specify either this parameter, maxConnections, or + // MaxConnectionsPerEndpoint: Defines a target maximum number of + // simultaneous connections for an endpoint of a NEG. This is multiplied + // by the number of endpoints in the NEG to implicitly calculate a + // maximum number of target maximum simultaneous connections for the + // NEG. If the backend's balancingMode is CONNECTION, and the backend is + // attached to a backend service whose loadBalancingScheme is EXTERNAL, + // you must specify either this parameter, maxConnections, or // maxConnectionsPerInstance. // // Not available if the backend's balancingMode is RATE. Internal @@ -3774,10 +3851,10 @@ type Backend struct { // balancing mode of CONNECTION. MaxConnectionsPerEndpoint int64 `json:"maxConnectionsPerEndpoint,omitempty"` - // MaxConnectionsPerInstance: Defines a maximum target for simultaneous - // connections for a single VM in a backend instance group. This is - // multiplied by the number of instances in the instance group to - // implicitly calculate a target maximum number of simultaneous + // MaxConnectionsPerInstance: Defines a target maximum number of + // simultaneous connections for a single VM in a backend instance group. + // This is multiplied by the number of instances in the instance group + // to implicitly calculate a target maximum number of simultaneous // connections for the whole instance group. If the backend's // balancingMode is UTILIZATION, this is an optional parameter. If the // backend's balancingMode is CONNECTION, and backend is attached to a @@ -3791,12 +3868,17 @@ type Backend struct { // balancing mode of CONNECTION. MaxConnectionsPerInstance int64 `json:"maxConnectionsPerInstance,omitempty"` - // MaxRate: The max requests per second (RPS) of the group. Can be used - // with either RATE or UTILIZATION balancing modes, but required if RATE - // mode. For RATE mode, either maxRate or maxRatePerInstance must be - // set. + // MaxRate: Defines a maximum number of HTTP requests per second (RPS) + // that the backend can handle. Valid for network endpoint group and + // instance group backends (except for regional managed instance + // groups). Must not be defined if the backend is a managed instance + // group that uses autoscaling based on load balancing. // - // This cannot be used for internal load balancing. + // If the backend's balancingMode is UTILIZATION, this is an optional + // parameter. If the backend's balancingMode is RATE, you must specify + // maxRate, maxRatePerInstance, or maxRatePerEndpoint. + // + // Not available if the backend's balancingMode is CONNECTION. MaxRate int64 `json:"maxRate,omitempty"` // MaxRatePerEndpoint: Defines a maximum target for requests per second @@ -3805,7 +3887,8 @@ type Backend struct { // for the NEG. // // If the backend's balancingMode is RATE, you must specify either this - // parameter, maxRate, or maxRatePerInstance. + // parameter, maxRate (except for regional managed instance groups), or + // maxRatePerInstance. // // Not available if the backend's balancingMode is CONNECTION. MaxRatePerEndpoint float64 `json:"maxRatePerEndpoint,omitempty"` @@ -3817,18 +3900,20 @@ type Backend struct { // // If the backend's balancingMode is UTILIZATION, this is an optional // parameter. If the backend's balancingMode is RATE, you must specify - // either this parameter, maxRate, or maxRatePerEndpoint. + // either this parameter, maxRate (except for regional managed instance + // groups), or maxRatePerEndpoint. // // Not available if the backend's balancingMode is CONNECTION. MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` - // MaxUtilization: Defines the maximum average CPU utilization of a + // MaxUtilization: Defines the maximum average backend utilization of a // backend VM in an instance group. The valid range is [0.0, 1.0]. This // is an optional parameter if the backend's balancingMode is // UTILIZATION. // // This parameter can be used in conjunction with maxRate, - // maxRatePerInstance, maxConnections, or maxConnectionsPerInstance. + // maxRatePerInstance, maxConnections (except for regional managed + // instance groups), or maxConnectionsPerInstance. MaxUtilization float64 `json:"maxUtilization,omitempty"` // ForceSendFields is a list of field names (e.g. "BalancingMode") to @@ -4144,10 +4229,18 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // A backend service contains configuration values for Google Cloud // Platform load balancing services. // +// Backend services in Google Compute Engine can be either regionally or +// globally scoped. +// +// * +// [Global](/compute/docs/reference/rest/{$api_version}/backendServices) +// * +// [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendSe +// rvices) +// // For more information, read Backend Services. // -// (== resource_for v1.backendService ==) (== resource_for -// beta.backendService ==) +// (== resource_for {$api_version}.backendService ==) type BackendService struct { // AffinityCookieTtlSec: If set to 0, the cookie is non-persistent and // lasts only until the end of the browser session (or equivalent). The @@ -4208,6 +4301,11 @@ type BackendService struct { // HTTP or HTTPS. EnableCDN bool `json:"enableCDN,omitempty"` + // FailoverPolicy: Applicable only to Failover for Internal TCP/UDP Load + // Balancing. Requires at least one backend instance group to be defined + // as a backup (failover) backend. + FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This // field will be ignored when inserting a BackendService. An up-to-date @@ -4218,15 +4316,13 @@ type BackendService struct { // BackendService. Fingerprint string `json:"fingerprint,omitempty"` - // HealthChecks: The list of URLs to the HttpHealthCheck or - // HttpsHealthCheck resource for health checking this BackendService. - // Currently at most one health check can be specified, and a health - // check is required for Compute Engine backend services. A health check - // must not be specified for App Engine backend and Cloud Function - // backend. - // - // For internal load balancing, a URL to a HealthCheck resource must be - // specified instead. + // HealthChecks: The list of URLs to the healthChecks, httpHealthChecks + // (legacy), or httpsHealthChecks (legacy) resource for health checking + // this backend service. Not all backend services support legacy health + // checks. See Load balancer guide. Currently at most one health check + // can be specified. Backend services with instance group or zonal NEG + // backends must have a health check. Backend services with internet NEG + // backends must not have a health check. A health check must HealthChecks []string `json:"healthChecks,omitempty"` Iap *BackendServiceIAP `json:"iap,omitempty"` @@ -4283,6 +4379,9 @@ type BackendService struct { // - A global backend service with the load_balancing_scheme set to // INTERNAL_SELF_MANAGED. // + // If sessionAffinity is not NONE, and this field is not set to >MAGLEV + // or RING_HASH, session affinity settings will not take effect. + // // Possible values: // "INVALID_LB_POLICY" // "LEAST_REQUEST" @@ -4293,6 +4392,11 @@ type BackendService struct { // "ROUND_ROBIN" LocalityLbPolicy string `json:"localityLbPolicy,omitempty"` + // LogConfig: This field denotes the logging options for the load + // balancer traffic served by this backend service. If logging is + // enabled, logs will be exported to Stackdriver. + LogConfig *BackendServiceLogConfig `json:"logConfig,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -4302,6 +4406,11 @@ type BackendService struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // Network: The URL of the network to which this backend service + // belongs. This field can only be spcified when the load balancing + // scheme is set to INTERNAL. + Network string `json:"network,omitempty"` + // OutlierDetection: Settings controlling the eviction of unhealthy // hosts from the load balancing pool for the backend service. If not // set, this feature is considered disabled. @@ -4323,10 +4432,10 @@ type BackendService struct { // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. Required - // when the loadBalancingScheme is EXTERNAL and the backends are - // instance groups. The named port must be defined on each backend - // instance group. This parameter has no meaning if the backends are - // NEGs. + // when the loadBalancingScheme is EXTERNAL, INTERNAL_MANAGED, or + // INTERNAL_SELF_MANAGED and the backends are instance groups. The named + // port must be defined on each backend instance group. This parameter + // has no meaning if the backends are NEGs. // // // @@ -4337,7 +4446,7 @@ type BackendService struct { // Protocol: The protocol this BackendService uses to communicate with // backends. // - // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP, depending + // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP. depending // on the chosen load balancer or Traffic Director configuration. Refer // to the documentation for the load balancer or for Traffic Director // for more information. @@ -4375,9 +4484,9 @@ type BackendService struct { // When the loadBalancingScheme is INTERNAL, possible values are NONE, // CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. // - // When the loadBalancingScheme is INTERNAL_SELF_MANAGED, possible - // values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or - // HTTP_COOKIE. + // When the loadBalancingScheme is INTERNAL_SELF_MANAGED, or + // INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, + // GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE. // // Possible values: // "CLIENT_IP" @@ -4623,6 +4732,79 @@ func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceFailoverPolicy: Applicable only to Failover for +// Internal TCP/UDP Load Balancing. On failover or failback, this field +// indicates whether connection draining will be honored. GCP has a +// fixed connection draining timeout of 10 minutes. A setting of true +// terminates existing TCP connections to the active pool during +// failover and failback, immediately draining traffic. A setting of +// false allows existing TCP connections to persist, even on VMs no +// longer in the active pool, for up to the duration of the connection +// draining timeout (10 minutes). +type BackendServiceFailoverPolicy struct { + // DisableConnectionDrainOnFailover: This can be set to true only if the + // protocol is TCP. + // + // The default is false. + DisableConnectionDrainOnFailover bool `json:"disableConnectionDrainOnFailover,omitempty"` + + // DropTrafficIfUnhealthy: Applicable only to Failover for Internal + // TCP/UDP Load Balancing. If set to true, connections to the load + // balancer are dropped when all primary and all backup backend VMs are + // unhealthy. If set to false, connections are distributed among all + // primary VMs when all primary and all backup backend VMs are + // unhealthy. + // + // The default is false. + DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` + + // FailoverRatio: Applicable only to Failover for Internal TCP/UDP Load + // Balancing. The value of the field must be in the range [0, 1]. If the + // value is 0, the load balancer performs a failover when the number of + // healthy primary VMs equals zero. For all other values, the load + // balancer performs a failover when the total number of healthy primary + // VMs is less than this ratio. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DisableConnectionDrainOnFailover") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "DisableConnectionDrainOnFailover") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceFailoverPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceFailoverPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { + type NoMethod BackendServiceFailoverPolicy + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + type BackendServiceGroupHealth struct { // HealthStatus: Health state of the backend instances or endpoints in // requested instance or network endpoint group, determined based on @@ -4851,6 +5033,57 @@ func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceLogConfig: The available logging options for the load +// balancer traffic served by this backend service. +type BackendServiceLogConfig struct { + // Enable: This field denotes whether to enable logging for the load + // balancer traffic served by this backend service. + Enable bool `json:"enable,omitempty"` + + // SampleRate: This field can only be specified if logging is enabled + // for this backend service. The value of the field must be in [0, 1]. + // This configures the sampling rate of requests to the load balancer + // where 1.0 means all logged requests are reported and 0.0 means no + // logged requests are reported. The default value is 1.0. + SampleRate float64 `json:"sampleRate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BackendServiceLogConfig) UnmarshalJSON(data []byte) error { + type NoMethod BackendServiceLogConfig + var s1 struct { + SampleRate gensupport.JSONFloat64 `json:"sampleRate"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.SampleRate = float64(s1.SampleRate) + return nil +} + type BackendServiceReference struct { BackendService string `json:"backendService,omitempty"` @@ -5042,6 +5275,26 @@ type Binding struct { // * `group:{emailid}`: An email address that represents a Google group. // For example, `admins@example.com`. // + // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. + // + // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. + // + // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. + // If the group is recovered, this value reverts to `group:{emailid}` + // and the recovered group retains the role in the binding. + // // // // * `domain:{domain}`: The G Suite domain (primary) that represents all @@ -5213,8 +5466,7 @@ func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { // committed use contract with an explicit start and end time. You can // create commitments based on vCPUs and memory usage and receive // discounted rates. For full details, read Signing Up for Committed Use -// Discounts. (== resource_for beta.regionCommitments ==) (== -// resource_for v1.regionCommitments ==) +// Discounts. (== resource_for {$api_version}.regionCommitments ==) type Commitment struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -5977,8 +6229,8 @@ type CorsPolicy struct { // Access-Control-Expose-Headers header. ExposeHeaders []string `json:"exposeHeaders,omitempty"` - // MaxAge: Specifies how long the results of a preflight request can be - // cached. This translates to the content for the Access-Control-Max-Age + // MaxAge: Specifies how long results of a preflight request can be + // cached in seconds. This translates to the Access-Control-Max-Age // header. MaxAge int64 `json:"maxAge,omitempty"` @@ -6012,6 +6264,11 @@ type CustomerEncryptionKey struct { // Cloud KMS. KmsKeyName string `json:"kmsKeyName,omitempty"` + // KmsKeyServiceAccount: The service account being used for the + // encryption request for the given KMS key. If absent, the Compute + // Engine default service account is used. + KmsKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` + // RawKey: Specifies a 256-bit customer-supplied encryption key, encoded // in RFC 4648 base64 to either encrypt or decrypt this resource. RawKey string `json:"rawKey,omitempty"` @@ -6142,7 +6399,13 @@ func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { // Disk: Represents a Persistent Disk resource. // -// Persistent disks are required for running your VM instances. Create +// Google Compute Engine has two Disk resources: +// +// * [Zonal](/compute/docs/reference/rest/{$api_version}/disks) * +// [Regional](/compute/docs/reference/rest/{$api_version}/regionDisks) +// +// P +// ersistent disks are required for running your VM instances. Create // both boot and non-boot (data) persistent disks. For more information, // read Persistent Disks. For more storage options, read Storage // options. @@ -6152,8 +6415,8 @@ func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { // // The regionDisks resource represents a regional persistent disk. For // more information, read Regional resources. (== resource_for -// beta.disks ==) (== resource_for v1.disks ==) (== resource_for -// v1.regionDisks ==) (== resource_for beta.regionDisks ==) +// {$api_version}.disks ==) (== resource_for {$api_version}.regionDisks +// ==) type Disk struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -6812,6 +7075,13 @@ func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { // DiskType: Represents a Disk Type resource. // +// Google Compute Engine has two Disk Type resources: +// +// * +// [Regional](/compute/docs/reference/rest/{$api_version}/regionDiskTypes +// ) * +// [Zonal](/compute/docs/reference/rest/{$api_version}/diskTypes) +// // You can choose from a variety of disk types based on your needs. For // more information, read Storage options. // @@ -6820,9 +7090,8 @@ func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { // // The regionDiskTypes resource represents disk types for a regional // persistent disk. For more information, read Regional persistent -// disks. (== resource_for beta.diskTypes ==) (== resource_for -// v1.diskTypes ==) (== resource_for v1.regionDiskTypes ==) (== -// resource_for beta.regionDiskTypes ==) +// disks. (== resource_for {$api_version}.diskTypes ==) (== resource_for +// {$api_version}.regionDiskTypes ==) type DiskType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -7681,165 +7950,33 @@ func (s *Duration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Expr: Represents an expression text. Example: -// -// title: "User account presence" description: "Determines whether the -// request has a user account" expression: "size(request.user) > 0" -type Expr struct { - // Description: An optional description of the expression. This is a - // longer text which describes the expression, e.g. when hovered over it - // in a UI. - Description string `json:"description,omitempty"` - - // Expression: Textual representation of an expression in Common - // Expression Language syntax. - // - // The application context of the containing message determines which - // well-known feature set of CEL is supported. - Expression string `json:"expression,omitempty"` - - // Location: An optional string indicating the location of the - // expression for error reporting, e.g. a file name and a position in - // the file. - Location string `json:"location,omitempty"` - - // Title: An optional title for the expression, i.e. a short string - // describing its purpose. This can be used e.g. in UIs which allow to - // enter the expression. - Title string `json:"title,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Expr) MarshalJSON() ([]byte, error) { - type NoMethod Expr - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ExternalVpnGateway: External VPN gateway is the on-premises VPN -// gateway(s) or another cloud provider?s VPN gateway that connects to -// your Google Cloud VPN gateway. To create a highly available VPN from -// Google Cloud to your on-premises side or another Cloud provider's VPN -// gateway, you must create a external VPN gateway resource in GCP, -// which provides the information to GCP about your external VPN -// gateway. -type ExternalVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Interfaces: List of interfaces for this external VPN gateway. - Interfaces []*ExternalVpnGatewayInterface `json:"interfaces,omitempty"` +type ExchangedPeeringRoute struct { + // DestRange: The destination range of the route. + DestRange string `json:"destRange,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#externalVpnGateway for externalVpnGateways. - Kind string `json:"kind,omitempty"` + // Imported: True if the peering route has been imported from a peer. + // The actual import happens if the field + // networkPeering.importCustomRoutes is true for this network, and + // networkPeering.exportCustomRoutes is true for the peer network, and + // the import does not result in a route conflict. + Imported bool `json:"imported,omitempty"` - // LabelFingerprint: A fingerprint for the labels being applied to this - // ExternalVpnGateway, which is essentially a hash of the labels set - // used for optimistic locking. The fingerprint is initially generated - // by Compute Engine and changes after every request to modify or update - // labels. You must always provide an up-to-date fingerprint hash in - // order to update or change labels, otherwise the request will fail - // with error 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve an - // ExternalVpnGateway. - LabelFingerprint string `json:"labelFingerprint,omitempty"` + // NextHopRegion: The region of peering route next hop, only applies to + // dynamic routes. + NextHopRegion string `json:"nextHopRegion,omitempty"` - // Labels: Labels to apply to this ExternalVpnGateway resource. These - // can be later modified by the setLabels method. Each label key/value - // must comply with RFC1035. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // Priority: The priority of the peering route. + Priority int64 `json:"priority,omitempty"` - // RedundancyType: Indicates the user-supplied redundancy type of this - // external VPN gateway. + // Type: The type of the peering route. // // Possible values: - // "FOUR_IPS_REDUNDANCY" - // "SINGLE_IP_INTERNALLY_REDUNDANT" - // "TWO_IPS_REDUNDANCY" - RedundancyType string `json:"redundancyType,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ExternalVpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod ExternalVpnGateway - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ExternalVpnGatewayInterface: The interface for the external VPN -// gateway. -type ExternalVpnGatewayInterface struct { - // Id: The numeric ID of this interface. The allowed input values for - // this id for different redundancy types of external VPN gateway: - // SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 - // FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 - Id int64 `json:"id,omitempty"` - - // IpAddress: IP address of the interface in the external VPN gateway. - // Only IPv4 is supported. This IP address can be either from your - // on-premise gateway or another Cloud provider?s VPN gateway, it cannot - // be an IP address from Google Compute Engine. - IpAddress string `json:"ipAddress,omitempty"` + // "DYNAMIC_PEERING_ROUTE" + // "STATIC_PEERING_ROUTE" + // "SUBNET_PEERING_ROUTE" + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "DestRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -7847,8 +7984,8 @@ type ExternalVpnGatewayInterface struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "DestRange") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -7856,26 +7993,23 @@ type ExternalVpnGatewayInterface struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { - type NoMethod ExternalVpnGatewayInterface +func (s *ExchangedPeeringRoute) MarshalJSON() ([]byte, error) { + type NoMethod ExchangedPeeringRoute raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ExternalVpnGatewayList: Response to the list request, and contains a -// list of externalVpnGateways. -type ExternalVpnGatewayList struct { - Etag string `json:"etag,omitempty"` - +type ExchangedPeeringRoutesList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of ExternalVpnGateway resources. - Items []*ExternalVpnGateway `json:"items,omitempty"` + // Items: A list of ExchangedPeeringRoute resources. + Items []*ExchangedPeeringRoute `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always - // compute#externalVpnGatewayList for lists of externalVpnGateways. + // compute#exchangedPeeringRoutesList for exchanged peering routes + // lists. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -7890,13 +8024,13 @@ type ExternalVpnGatewayList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *ExternalVpnGatewayListWarning `json:"warning,omitempty"` + Warning *ExchangedPeeringRoutesListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Etag") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -7904,7 +8038,7 @@ type ExternalVpnGatewayList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Etag") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -7913,15 +8047,379 @@ type ExternalVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { - type NoMethod ExternalVpnGatewayList +func (s *ExchangedPeeringRoutesList) MarshalJSON() ([]byte, error) { + type NoMethod ExchangedPeeringRoutesList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ExternalVpnGatewayListWarning: [Output Only] Informational warning -// message. -type ExternalVpnGatewayListWarning struct { +// ExchangedPeeringRoutesListWarning: [Output Only] Informational +// warning message. +type ExchangedPeeringRoutesListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ExchangedPeeringRoutesListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExchangedPeeringRoutesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ExchangedPeeringRoutesListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ExchangedPeeringRoutesListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExchangedPeeringRoutesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ExchangedPeeringRoutesListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Expr: Represents a textual expression in the Common Expression +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. +// +// Example (Comparison): +// +// title: "Summary size limit" description: "Determines if a summary is +// less than 100 chars" expression: "document.summary.size() < +// 100" +// +// Example (Equality): +// +// title: "Requestor is owner" description: "Determines if requestor is +// the document owner" expression: "document.owner == +// request.auth.claims.email" +// +// Example (Logic): +// +// title: "Public documents" description: "Determine whether the +// document should be publicly visible" expression: "document.type != +// 'private' && document.type != 'internal'" +// +// Example (Data Manipulation): +// +// title: "Notification string" description: "Create a notification +// string with a timestamp." expression: "'New message received at ' + +// string(document.create_time)" +// +// The exact variables and functions that may be referenced within an +// expression are determined by the service that evaluates it. See the +// service documentation for additional information. +type Expr struct { + // Description: Optional. Description of the expression. This is a + // longer text which describes the expression, e.g. when hovered over it + // in a UI. + Description string `json:"description,omitempty"` + + // Expression: Textual representation of an expression in Common + // Expression Language syntax. + Expression string `json:"expression,omitempty"` + + // Location: Optional. String indicating the location of the expression + // for error reporting, e.g. a file name and a position in the file. + Location string `json:"location,omitempty"` + + // Title: Optional. Title for the expression, i.e. a short string + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Expr) MarshalJSON() ([]byte, error) { + type NoMethod Expr + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGateway: External VPN gateway is the on-premises VPN +// gateway(s) or another cloud provider's VPN gateway that connects to +// your Google Cloud VPN gateway. To create a highly available VPN from +// Google Cloud to your on-premises side or another Cloud provider's VPN +// gateway, you must create a external VPN gateway resource in GCP, +// which provides the information to GCP about your external VPN +// gateway. +type ExternalVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Interfaces: List of interfaces for this external VPN gateway. + Interfaces []*ExternalVpnGatewayInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#externalVpnGateway for externalVpnGateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // ExternalVpnGateway, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // ExternalVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this ExternalVpnGateway resource. These + // can be later modified by the setLabels method. Each label key/value + // must comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // RedundancyType: Indicates the user-supplied redundancy type of this + // external VPN gateway. + // + // Possible values: + // "FOUR_IPS_REDUNDANCY" + // "SINGLE_IP_INTERNALLY_REDUNDANT" + // "TWO_IPS_REDUNDANCY" + RedundancyType string `json:"redundancyType,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayInterface: The interface for the external VPN +// gateway. +type ExternalVpnGatewayInterface struct { + // Id: The numeric ID of this interface. The allowed input values for + // this id for different redundancy types of external VPN gateway: + // SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 + // FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 + Id int64 `json:"id,omitempty"` + + // IpAddress: IP address of the interface in the external VPN gateway. + // Only IPv4 is supported. This IP address can be either from your + // on-premise gateway or another Cloud provider's VPN gateway, it cannot + // be an IP address from Google Compute Engine. + IpAddress string `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayInterface + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayList: Response to the list request, and contains a +// list of externalVpnGateways. +type ExternalVpnGatewayList struct { + Etag string `json:"etag,omitempty"` + + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of ExternalVpnGateway resources. + Items []*ExternalVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#externalVpnGatewayList for lists of externalVpnGateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ExternalVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayListWarning: [Output Only] Informational warning +// message. +type ExternalVpnGatewayListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -8021,6 +8519,39 @@ func (s *ExternalVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type FileContentBuffer struct { + // Content: The raw content in the secure keys file. + Content string `json:"content,omitempty"` + + // Possible values: + // "BIN" + // "UNDEFINED" + // "X509" + FileType string `json:"fileType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Content") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Content") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FileContentBuffer) MarshalJSON() ([]byte, error) { + type NoMethod FileContentBuffer + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Firewall: Represents a Firewall Rule resource. // // Firewall rules allow or deny ingress traffic to, and egress traffic @@ -8504,6 +9035,15 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // ForwardingRule: Represents a Forwarding Rule resource. // +// Forwarding rule resources in GCP can be either regional or global in +// scope: +// +// * +// [Global](/compute/docs/reference/rest/{$api_version}/globalForwardingR +// ules) * +// [Regional](/compute/docs/reference/rest/{$api_version}/forwardingRules +// ) +// // A forwarding rule and its corresponding IP address represent the // frontend configuration of a Google Cloud Platform load balancer. // Forwarding rules can also reference target instances and Cloud VPN @@ -8512,11 +9052,9 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // For more information, read Forwarding rule concepts and Using // protocol forwarding. // -// (== resource_for beta.forwardingRules ==) (== resource_for -// v1.forwardingRules ==) (== resource_for beta.globalForwardingRules -// ==) (== resource_for v1.globalForwardingRules ==) (== resource_for -// beta.regionForwardingRules ==) (== resource_for -// v1.regionForwardingRules ==) +// (== resource_for {$api_version}.forwardingRules ==) (== resource_for +// {$api_version}.globalForwardingRules ==) (== resource_for +// {$api_version}.regionForwardingRules ==) type ForwardingRule struct { // IPAddress: IP address that this forwarding rule serves. When a client // sends traffic to this IP address, the forwarding rule directs the @@ -8535,8 +9073,8 @@ type ForwardingRule struct { // ss_specifications). IPAddress string `json:"IPAddress,omitempty"` - // IPProtocol: The IP protocol to which this rule applies. Valid options - // are TCP, UDP, ESP, AH, SCTP or ICMP. + // IPProtocol: The IP protocol to which this rule applies. For protocol + // forwarding, valid options are TCP, UDP, ESP, AH, SCTP or ICMP. // // For Internal TCP/UDP Load Balancing, the load balancing scheme is // INTERNAL, and one of TCP or UDP are valid. For Traffic Director, the @@ -8566,6 +9104,13 @@ type ForwardingRule struct { // forwarded to the backends configured with this forwarding rule. AllPorts bool `json:"allPorts,omitempty"` + // AllowGlobalAccess: This field is used along with the backend_service + // field for internal load balancing or with the target field for + // internal TargetInstance. If the field is set to TRUE, clients can + // access ILB from all regions. Otherwise only allows access from + // clients in the same region as the internal load balancer. + AllowGlobalAccess bool `json:"allowGlobalAccess,omitempty"` + // BackendService: This field is only used for INTERNAL load // balancing. // @@ -8581,6 +9126,16 @@ type ForwardingRule struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a ForwardingRule. Include the + // fingerprint in patch request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + // + // To see the latest fingerprint, make a get() request to retrieve a + // ForwardingRule. + Fingerprint string `json:"fingerprint,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -8595,22 +9150,33 @@ type ForwardingRule struct { // "UNSPECIFIED_VERSION" IpVersion string `json:"ipVersion,omitempty"` + // IsMirroringCollector: Indicates whether or not this load balancer can + // be used as a collector for packet mirroring. To prevent mirroring + // loops, instances behind this load balancer will not have their + // traffic mirrored even if a PacketMirroring rule applies to them. This + // can only be set to true for load balancers that have their + // loadBalancingScheme set to INTERNAL. + IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` + // Kind: [Output Only] Type of the resource. Always // compute#forwardingRule for Forwarding Rule resources. Kind string `json:"kind,omitempty"` - // LoadBalancingScheme: Specifies the forwarding rule type. EXTERNAL is - // used for: - Classic Cloud VPN gateways - Protocol forwarding to VMs - // from an external IP address - The following load balancers: HTTP(S), - // SSL Proxy, TCP Proxy, and Network TCP/UDP. - // - // INTERNAL is used for: - Protocol forwarding to VMs from an internal - // IP address - Internal TCP/UDP load balancers + // LoadBalancingScheme: Specifies the forwarding rule type. // - // INTERNAL_MANAGED is used for: - Internal HTTP(S) load - // balancers // - // INTERNAL_SELF_MANAGED is used for: - Traffic Director + // - EXTERNAL is used for: + // - Classic Cloud VPN gateways + // - Protocol forwarding to VMs from an external IP address + // - The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and + // Network TCP/UDP + // - INTERNAL is used for: + // - Protocol forwarding to VMs from an internal IP address + // - Internal TCP/UDP load balancers + // - INTERNAL_MANAGED is used for: + // - Internal HTTP(S) load balancers + // - INTERNAL_SELF_MANAGED is used for: + // - Traffic Director // // For more information about forwarding rules, refer to Forwarding rule // concepts. @@ -8624,18 +9190,21 @@ type ForwardingRule struct { LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` // MetadataFilters: Opaque filter criteria used by Loadbalancer to - // restrict routing configuration to a limited set xDS compliant + // restrict routing configuration to a limited set of xDS compliant // clients. In their xDS requests to Loadbalancer, xDS clients present - // node metadata. If a match takes place, the relevant routing - // configuration is made available to those proxies. + // node metadata. If a match takes place, the relevant configuration is + // made available to those proxies. Otherwise, all the resources (e.g. + // TargetHttpProxy, UrlMap) referenced by the ForwardingRule will not be + // visible to those proxies. // For each metadataFilter in this list, if its filterMatchCriteria is // set to MATCH_ANY, at least one of the filterLabels must match the // corresponding label provided in the metadata. If its // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels - // must match with corresponding labels in the provided + // must match with corresponding labels provided in the // metadata. - // metadataFilters specified here can be overridden by those specified - // in the UrlMap that this ForwardingRule references. + // metadataFilters specified here will be applifed before those + // specified in the UrlMap that this ForwardingRule + // references. // metadataFilters only applies to Loadbalancers that have their // loadBalancingScheme set to INTERNAL_SELF_MANAGED. MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` @@ -8658,7 +9227,7 @@ type ForwardingRule struct { Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring - // this load balancer and can only take the following values: PREMIUM , + // this load balancer and can only take the following values: PREMIUM, // STANDARD. // // For regional ForwardingRule, the valid values are PREMIUM and @@ -8673,24 +9242,21 @@ type ForwardingRule struct { // "STANDARD" NetworkTier string `json:"networkTier,omitempty"` - // PortRange: This field is deprecated. See the port - // field. - PortRange string `json:"portRange,omitempty"` - - // Ports: List of comma-separated ports. The forwarding rule forwards - // packets with matching destination ports. If the forwarding rule's - // loadBalancingScheme is EXTERNAL, and the forwarding rule references a - // target pool, specifying ports is optional. You can specify an - // unlimited number of ports, but they must be contiguous. If you omit - // ports, GCP forwards traffic on any port of the forwarding rule's - // protocol. - // - // If the forwarding rule's loadBalancingScheme is EXTERNAL, and the - // forwarding rule references a target HTTP proxy, target HTTPS proxy, - // target TCP proxy, target SSL proxy, or target VPN gateway, you must - // specify ports using the following constraints: + // PortRange: When the load balancing scheme is EXTERNAL, + // INTERNAL_SELF_MANAGED and INTERNAL_MANAGED, you can specify a + // port_range. Use with a forwarding rule that points to a target proxy + // or a target pool. Do not use with a forwarding rule that points to a + // backend service. This field is used along with the target field for + // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, + // TargetVpnGateway, TargetPool, TargetInstance. // + // Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets + // addressed to ports in the specified range will be forwarded to + // target. Forwarding rules with the same [IPAddress, IPProtocol] pair + // must have disjoint port ranges. // + // Some types of forwarding target have constraints on the acceptable + // ports: // - TargetHttpProxy: 80, 8080 // - TargetHttpsProxy: 443 // - TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, @@ -8698,21 +9264,22 @@ type ForwardingRule struct { // - TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, // 995, 1688, 1883, 5222 // - TargetVpnGateway: 500, 4500 + PortRange string `json:"portRange,omitempty"` + + // Ports: This field is used along with the backend_service field for + // internal load balancing. // - // If the forwarding rule's loadBalancingScheme is INTERNAL, you must + // When the load balancing scheme is INTERNAL, a list of ports can be + // configured, for example, ['80'], ['8000','9000']. Only packets + // addressed to these ports are forwarded to the backends configured + // with the forwarding rule. + // + // If the forwarding rule's loadBalancingScheme is INTERNAL, you can // specify ports in one of the following ways: // // * A list of up to five ports, which can be non-contiguous * Keyword // ALL, which causes the forwarding rule to forward traffic on any port // of the forwarding rule's protocol. - // - // The ports field is used along with the target field for - // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, - // TargetVpnGateway, TargetPool, TargetInstance. - // - // Applicable only when IPProtocol is TCP, UDP, or SCTP. Forwarding - // rules with the same [IPAddress, IPProtocol] pair must have disjoint - // port ranges. Ports []string `json:"ports,omitempty"` // Region: [Output Only] URL of the region where the regional forwarding @@ -8760,8 +9327,8 @@ type ForwardingRule struct { // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are - // valid. + // INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy is valid, + // not targetHttpsProxy. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9262,6 +9829,62 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GlobalNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GlobalNetworkEndpointGroupsAttachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GlobalNetworkEndpointGroupsDetachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be detached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GlobalNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GlobalNetworkEndpointGroupsDetachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type GlobalSetLabelsRequest struct { // LabelFingerprint: The fingerprint of the previous set of labels for // this resource, used to detect conflicts. The fingerprint is initially @@ -9351,7 +9974,7 @@ type GuestAttributes struct { Kind string `json:"kind,omitempty"` // QueryPath: The path to be queried. This can be the default namespace - // ('/') or a nested namespace ('//') or a specified key ('//') + // ('/') or a nested namespace ('/\/') or a specified key ('/\/\') QueryPath string `json:"queryPath,omitempty"` // QueryValue: [Output Only] The value of the requested queried path. @@ -9508,24 +10131,18 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how port is selected for health // checking, can be one of following values: - // USE_FIXED_PORT: The port number in - // port - // is used for health checking. - // USE_NAMED_PORT: The - // portName - // is used for health checking. + // USE_FIXED_PORT: The port number in port is used for health + // checking. + // USE_NAMED_PORT: The portName is used for health + // checking. // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for // each network endpoint is used for health checking. For other // backends, the port or named port specified in the Backend Service is // used for health checking. // // - // If not specified, HTTP2 health check follows behavior specified - // in - // port - // and - // portName - // fields. + // If not specified, HTTP2 health check follows behavior specified in + // port and portName fields. // // Possible values: // "USE_FIXED_PORT" @@ -9590,24 +10207,18 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how port is selected for health // checking, can be one of following values: - // USE_FIXED_PORT: The port number in - // port - // is used for health checking. - // USE_NAMED_PORT: The - // portName - // is used for health checking. + // USE_FIXED_PORT: The port number in port is used for health + // checking. + // USE_NAMED_PORT: The portName is used for health + // checking. // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for // each network endpoint is used for health checking. For other // backends, the port or named port specified in the Backend Service is // used for health checking. // // - // If not specified, HTTP health check follows behavior specified - // in - // port - // and - // portName - // fields. + // If not specified, HTTP health check follows behavior specified in + // port and portName fields. // // Possible values: // "USE_FIXED_PORT" @@ -9672,24 +10283,18 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how port is selected for health // checking, can be one of following values: - // USE_FIXED_PORT: The port number in - // port - // is used for health checking. - // USE_NAMED_PORT: The - // portName - // is used for health checking. + // USE_FIXED_PORT: The port number in port is used for health + // checking. + // USE_NAMED_PORT: The portName is used for health + // checking. // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for // each network endpoint is used for health checking. For other // backends, the port or named port specified in the Backend Service is // used for health checking. // // - // If not specified, HTTPS health check follows behavior specified - // in - // port - // and - // portName - // fields. + // If not specified, HTTPS health check follows behavior specified in + // port and portName fields. // // Possible values: // "USE_FIXED_PORT" @@ -9740,8 +10345,16 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // HealthCheck: Represents a Health Check resource. // -// Health checks are used for most GCP load balancers and managed -// instance group auto-healing. For more information, read Health Check +// Google Compute Engine has two Health Check resources: +// +// * [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) +// * +// [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChe +// cks) +// +// Internal HTTP(S) load balancers use regional health checks. All other +// types of GCP load balancers and managed instance group auto-healing +// use global health checks. For more information, read Health Check // Concepts. // // To perform health checks on network load balancers, you must use @@ -10332,10 +10945,11 @@ type HealthStatus struct { // Instance: URL of the instance resource. Instance string `json:"instance,omitempty"` - // IpAddress: The IP address represented by this resource. + // IpAddress: A forwarding rule IP address assigned to this instance. IpAddress string `json:"ipAddress,omitempty"` - // Port: The port on the instance. + // Port: The named port of the instance group, not necessarily the port + // that is health-checked. Port int64 `json:"port,omitempty"` // ForceSendFields is a list of field names (e.g. "HealthState") to @@ -10416,9 +11030,9 @@ type HostRule struct { Description string `json:"description,omitempty"` // Hosts: The list of host patterns to match. They must be valid - // hostnames, except * will match any string of ([a-z0-9-.]*). In that - // case, * must be the first character and must be followed in the - // pattern by either - or .. + // hostnames with optional port numbers in the format host:port. * + // matches any string of ([a-z0-9-.]*). In that case, * must be the + // first character and must be followed in the pattern by either - or .. Hosts []string `json:"hosts,omitempty"` // PathMatcher: The name of the PathMatcher to use to match the path @@ -10660,8 +11274,8 @@ type HttpHeaderMatch struct { PrefixMatch string `json:"prefixMatch,omitempty"` // PresentMatch: A header with the contents of headerName must exist. - // The match takes place whether or not the request's header has a value - // or not. + // The match takes place whether or not the request's header has a + // value. // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, // presentMatch or rangeMatch must be set. PresentMatch bool `json:"presentMatch,omitempty"` @@ -10676,16 +11290,20 @@ type HttpHeaderMatch struct { // - -3someString will not match. // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, // presentMatch or rangeMatch must be set. + // Note that rangeMatch is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. RangeMatch *Int64RangeMatch `json:"rangeMatch,omitempty"` - // RegexMatch: The value of the header must match the regualar - // expression specified in regexMatch. For regular expression grammar, - // please see: en.cppreference.com/w/cpp/regex/ecmascript + // RegexMatch: The value of the header must match the regular expression + // specified in regexMatch. For regular expression grammar, please see: + // en.cppreference.com/w/cpp/regex/ecmascript // For matching against a port specified in the HTTP request, use a // headerMatch with headerName set to PORT and a regular expression that // satisfies the RFC2616 Host header's port specifier. // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, // presentMatch or rangeMatch must be set. + // Note that regexMatch only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` // SuffixMatch: The value of the header must end with the contents of @@ -11008,7 +11626,7 @@ func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { type HttpQueryParameterMatch struct { // ExactMatch: The queryParameterMatch matches if the value of the // parameter exactly matches the contents of exactMatch. - // Only one of presentMatch, exactMatch and regexMatch must be set. + // Only one of presentMatch, exactMatch or regexMatch must be set. ExactMatch string `json:"exactMatch,omitempty"` // Name: The name of the query parameter to match. The query parameter @@ -11019,14 +11637,16 @@ type HttpQueryParameterMatch struct { // PresentMatch: Specifies that the queryParameterMatch matches if the // request contains the query parameter, irrespective of whether the // parameter has a value or not. - // Only one of presentMatch, exactMatch and regexMatch must be set. + // Only one of presentMatch, exactMatch or regexMatch must be set. PresentMatch bool `json:"presentMatch,omitempty"` // RegexMatch: The queryParameterMatch matches if the value of the // parameter matches the regular expression specified by regexMatch. For // the regular expression grammar, please see // en.cppreference.com/w/cpp/regex/ecmascript - // Only one of presentMatch, exactMatch and regexMatch must be set. + // Only one of presentMatch, exactMatch or regexMatch must be set. + // Note that regexMatch only applies when the loadBalancingScheme is set + // to INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` // ForceSendFields is a list of field names (e.g. "ExactMatch") to @@ -11069,13 +11689,19 @@ type HttpRedirectAction struct { // PathRedirect: The path that will be used in the redirect response // instead of the one that was supplied in the request. - // Only one of pathRedirect or prefixRedirect must be specified. + // pathRedirect cannot be supplied together with prefixRedirect. Supply + // one alone or neither. If neither is supplied, the path of the + // original request will be used for the redirect. // The value must be between 1 and 1024 characters. PathRedirect string `json:"pathRedirect,omitempty"` // PrefixRedirect: The prefix that replaces the prefixMatch specified in // the HttpRouteRuleMatch, retaining the remaining portion of the URL // before redirecting the request. + // prefixRedirect cannot be supplied together with pathRedirect. Supply + // one alone or neither. If neither is supplied, the path of the + // original request will be used for the redirect. + // The value must be between 1 and 1024 characters. PrefixRedirect string `json:"prefixRedirect,omitempty"` // RedirectResponseCode: The HTTP Status code to use for this @@ -11224,7 +11850,7 @@ type HttpRouteAction struct { Timeout *Duration `json:"timeout,omitempty"` // UrlRewrite: The spec to modify the URL of the request, prior to - // forwarding the request to the matched service + // forwarding the request to the matched service. UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` // WeightedBackendServices: A list of weighted backend services to send @@ -11303,7 +11929,8 @@ type HttpRouteRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. - // Only one of routeAction or urlRedirect must be set. + // Only one of urlRedirect, service or + // routeAction.weightedBackendService must be set. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -11349,11 +11976,11 @@ func (s *HttpRouteRule) MarshalJSON() ([]byte, error) { // for matching requests to an HttpRouteRule. All specified criteria // must be satisfied for a match to occur. type HttpRouteRuleMatch struct { - // FullPathMatch: For satifying the matchRule condition, the path of the - // request must exactly match the value specified in fullPathMatch after - // removing any query parameters and anchor that may be part of the - // original URL. - // FullPathMatch must be between 1 and 1024 characters. + // FullPathMatch: For satisfying the matchRule condition, the path of + // the request must exactly match the value specified in fullPathMatch + // after removing any query parameters and anchor that may be part of + // the original URL. + // fullPathMatch must be between 1 and 1024 characters. // Only one of prefixMatch, fullPathMatch or regexMatch must be // specified. FullPathMatch string `json:"fullPathMatch,omitempty"` @@ -11365,11 +11992,11 @@ type HttpRouteRuleMatch struct { // IgnoreCase: Specifies that prefixMatch and fullPathMatch matches are // case sensitive. // The default value is false. - // caseSensitive must not be used with regexMatch. + // ignoreCase must not be used with regexMatch. IgnoreCase bool `json:"ignoreCase,omitempty"` // MetadataFilters: Opaque filter criteria used by Loadbalancer to - // restrict routing configuration to a limited set xDS compliant + // restrict routing configuration to a limited set of xDS compliant // clients. In their xDS requests to Loadbalancer, xDS clients present // node metadata. If a match takes place, the relevant routing // configuration is made available to those proxies. @@ -11377,15 +12004,16 @@ type HttpRouteRuleMatch struct { // set to MATCH_ANY, at least one of the filterLabels must match the // corresponding label provided in the metadata. If its // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels - // must match with corresponding labels in the provided + // must match with corresponding labels provided in the // metadata. - // metadataFilters specified here can be overrides those specified in - // ForwardingRule that refers to this UrlMap. + // metadataFilters specified here will be applied after those specified + // in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch + // belongs to. // metadataFilters only applies to Loadbalancers that have their // loadBalancingScheme set to INTERNAL_SELF_MANAGED. MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` - // PrefixMatch: For satifying the matchRule condition, the request's + // PrefixMatch: For satisfying the matchRule condition, the request's // path must begin with the specified prefixMatch. prefixMatch must // begin with a /. // The value must be between 1 and 1024 characters. @@ -11398,13 +12026,15 @@ type HttpRouteRuleMatch struct { // the request. QueryParameterMatches []*HttpQueryParameterMatch `json:"queryParameterMatches,omitempty"` - // RegexMatch: For satifying the matchRule condition, the path of the + // RegexMatch: For satisfying the matchRule condition, the path of the // request must satisfy the regular expression specified in regexMatch // after removing any query parameters and anchor supplied with the // original URL. For regular expression grammar please see // en.cppreference.com/w/cpp/regex/ecmascript // Only one of prefixMatch, fullPathMatch or regexMatch must be // specified. + // Note that regexMatch only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` // ForceSendFields is a list of field names (e.g. "FullPathMatch") to @@ -11680,8 +12310,8 @@ func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { // Image: Represents an Image resource. // // You can use images to create boot disks for your VM instances. For -// more information, read Images. (== resource_for beta.images ==) (== -// resource_for v1.images ==) +// more information, read Images. (== resource_for {$api_version}.images +// ==) type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google // Cloud Storage (in bytes). @@ -11775,6 +12405,10 @@ type Image struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ShieldedInstanceInitialState: Set the secure boot keys of shielded + // instance. + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` + // SourceDisk: URL of the source disk used to create this image. This // can be a full or valid partial URL. You must provide either this // property or the rawDisk.source property but not both to create an @@ -12082,11 +12716,49 @@ func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InitialStateConfig: Initial State for shielded instance, these are +// public keys which are safe to store in public +type InitialStateConfig struct { + // Dbs: The Key Database (db). + Dbs []*FileContentBuffer `json:"dbs,omitempty"` + + // Dbxs: The forbidden key database (dbx). + Dbxs []*FileContentBuffer `json:"dbxs,omitempty"` + + // Keks: The Key Exchange Key (KEK). + Keks []*FileContentBuffer `json:"keks,omitempty"` + + // Pk: The Platform Key (PK). + Pk *FileContentBuffer `json:"pk,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Dbs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Dbs") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InitialStateConfig) MarshalJSON() ([]byte, error) { + type NoMethod InitialStateConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Instance: Represents an Instance resource. // // An instance is a virtual machine that is hosted on Google Cloud // Platform. For more information, read Virtual Machine Instances. (== -// resource_for beta.instances ==) (== resource_for v1.instances ==) +// resource_for {$api_version}.instances ==) type Instance struct { // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan @@ -12116,6 +12788,16 @@ type Instance struct { // DisplayDevice: Enables display device for the instance. DisplayDevice *DisplayDevice `json:"displayDevice,omitempty"` + // Fingerprint: Specifies a fingerprint for this resource, which is + // essentially a hash of the instance's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update the instance. You + // must always provide an up-to-date fingerprint hash in order to update + // the instance. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` + // GuestAccelerators: A list of the type and count of accelerator cards // attached to the instance. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` @@ -12203,6 +12885,9 @@ type Instance struct { // can consume from. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // ResourcePolicies: Resource policies applied to this instance. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // Scheduling: Sets the scheduling options for this instance. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -12231,6 +12916,7 @@ type Instance struct { // SUSPENDING, SUSPENDED, and TERMINATED. // // Possible values: + // "DEPROVISIONING" // "PROVISIONING" // "REPAIRING" // "RUNNING" @@ -12441,18 +13127,25 @@ func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroup: Represents an unmanaged Instance Group resource. +// InstanceGroup: Represents an Instance Group resource. +// +// Instance Groups can be used to configure a target for load +// balancing. +// +// Instance groups can either be managed or unmanaged. // -// Use unmanaged instance groups if you need to apply load balancing to -// groups of heterogeneous instances or if you need to manage the -// instances yourself. For more information, read Instance groups. +// To create managed instance groups, use the instanceGroupManager or +// regionInstanceGroupManager resource instead. // -// For zonal unmanaged Instance Group, use instanceGroups resource. +// Use zonal unmanaged instance groups if you need to apply load +// balancing to groups of heterogeneous instances or if you need to +// manage the instances yourself. You cannot create regional unmanaged +// instance groups. // -// For regional unmanaged Instance Group, use regionInstanceGroups -// resource. (== resource_for beta.instanceGroups ==) (== resource_for -// v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) -// (== resource_for v1.regionInstanceGroups ==) +// For more information, read Instance groups. +// +// (== resource_for {$api_version}.instanceGroups ==) (== resource_for +// {$api_version}.regionInstanceGroups ==) type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -12865,10 +13558,8 @@ func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { // // For regional Managed Instance Group, use the // regionInstanceGroupManagers resource. (== resource_for -// beta.instanceGroupManagers ==) (== resource_for -// v1.instanceGroupManagers ==) (== resource_for -// beta.regionInstanceGroupManagers ==) (== resource_for -// v1.regionInstanceGroupManagers ==) +// {$api_version}.instanceGroupManagers ==) (== resource_for +// {$api_version}.regionInstanceGroupManagers ==) type InstanceGroupManager struct { // AutoHealingPolicies: The autohealing policy for this managed instance // group. You can specify only one value. @@ -12948,8 +13639,9 @@ type InstanceGroupManager struct { TargetPools []string `json:"targetPools,omitempty"` // TargetSize: The target number of running instances for this managed - // instance group. Deleting or abandoning instances reduces this number. - // Resizing the group changes this number. + // instance group. You can reduce this number by using the + // instanceGroupManager deleteInstances or abandonInstances methods. + // Resizing the group also changes this number. TargetSize int64 `json:"targetSize,omitempty"` // UpdatePolicy: The update policy for this managed instance group. @@ -13429,6 +14121,10 @@ func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { } type InstanceGroupManagerStatus struct { + // Autoscaler: [Output Only] The URL of the Autoscaler that targets this + // instance group manager. + Autoscaler string `json:"autoscaler,omitempty"` + // IsStable: [Output Only] A bit indicating whether the managed instance // group is in a stable state. A stable state means that: none of the // instances in the managed instance group is currently undergoing any @@ -13437,7 +14133,12 @@ type InstanceGroupManagerStatus struct { // group; and the managed instance group itself is not being modified. IsStable bool `json:"isStable,omitempty"` - // ForceSendFields is a list of field names (e.g. "IsStable") to + // VersionTarget: [Output Only] A status of consistency of Instances' + // versions with their target version specified by version field on + // Instance Group Manager. + VersionTarget *InstanceGroupManagerStatusVersionTarget `json:"versionTarget,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Autoscaler") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13445,7 +14146,7 @@ type InstanceGroupManagerStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IsStable") to include in + // NullFields is a list of field names (e.g. "Autoscaler") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -13460,7 +14161,49 @@ func (s *InstanceGroupManagerStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceGroupManagerStatusVersionTarget struct { + // IsReached: [Output Only] A bit indicating whether version target has + // been reached in this managed instance group, i.e. all instances are + // in their target version. Instances' target version are specified by + // version field on Instance Group Manager. + IsReached bool `json:"isReached,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IsReached") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IsReached") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerStatusVersionTarget) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerStatusVersionTarget + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerUpdatePolicy struct { + // InstanceRedistributionType: The instance redistribution policy for + // regional managed instance groups. Valid values are: + // - PROACTIVE (default): The group attempts to maintain an even + // distribution of VM instances across zones in the region. + // - NONE: For non-autoscaled groups, proactive redistribution is + // disabled. + // + // Possible values: + // "NONE" + // "PROACTIVE" + InstanceRedistributionType string `json:"instanceRedistributionType,omitempty"` + // MaxSurge: The maximum number of instances that can be created above // the specified targetSize during the update process. By default, a // fixed value of 1 is used. This value can be either a fixed number or @@ -13500,10 +14243,20 @@ type InstanceGroupManagerUpdatePolicy struct { // disruptive action. // // Possible values: + // "NONE" + // "REFRESH" // "REPLACE" // "RESTART" MinimalAction string `json:"minimalAction,omitempty"` + // ReplacementMethod: What action should be used to replace instances. + // See minimal_action.REPLACE + // + // Possible values: + // "RECREATE" + // "SUBSTITUTE" + ReplacementMethod string `json:"replacementMethod,omitempty"` + // Type: The type of update process. You can specify either PROACTIVE so // that the instance group manager proactively executes actions in order // to bring instances to their target versions or OPPORTUNISTIC so that @@ -13516,20 +14269,22 @@ type InstanceGroupManagerUpdatePolicy struct { // "PROACTIVE" Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaxSurge") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "InstanceRedistributionType") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaxSurge") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. + // "InstanceRedistributionType") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -13615,6 +14370,100 @@ func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, er return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagersApplyUpdatesRequest: +// InstanceGroupManagers.applyUpdatesToInstances +type InstanceGroupManagersApplyUpdatesRequest struct { + // Instances: The list of URLs of one or more instances for which you + // want to apply updates. Each URL can be a full URL or a partial URL, + // such as zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // MinimalAction: The minimal action that you want to perform on each + // instance during the update: + // - REPLACE: At minimum, delete the instance and create it again. + // - RESTART: Stop the instance and start it again. + // - REFRESH: Do not stop the instance. + // - NONE: Do not disrupt the instance at all. By default, the minimum + // action is NONE. If your update requires a more disruptive action than + // you set with this flag, the necessary action is performed to execute + // the update. + // + // Possible values: + // "NONE" + // "REFRESH" + // "REPLACE" + // "RESTART" + MinimalAction string `json:"minimalAction,omitempty"` + + // MostDisruptiveAllowedAction: The most disruptive action that you want + // to perform on each instance during the update: + // - REPLACE: Delete the instance and create it again. + // - RESTART: Stop the instance and start it again. + // - REFRESH: Do not stop the instance. + // - NONE: Do not disrupt the instance at all. By default, the most + // disruptive allowed action is REPLACE. If your update requires a more + // disruptive action than you set with this flag, the update request + // will fail. + // + // Possible values: + // "NONE" + // "REFRESH" + // "REPLACE" + // "RESTART" + MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersApplyUpdatesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceGroupManagersCreateInstancesRequest: +// InstanceGroupManagers.createInstances +type InstanceGroupManagersCreateInstancesRequest struct { + // Instances: [Required] List of specifications of per-instance configs. + Instances []*PerInstanceConfig `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersCreateInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagersDeleteInstancesRequest struct { // Instances: The URLs of one or more instances to delete. This can be a // full URL or a partial URL, such as @@ -13644,11 +14493,59 @@ func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, err return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceGroupManagersListErrorsResponse struct { + // Items: [Output Only] The list of errors of the managed instance + // group. + Items []*InstanceManagedByIgmError `json:"items,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersListErrorsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagersListManagedInstancesResponse struct { // ManagedInstances: [Output Only] The list of instances in the managed // instance group. ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14640,6 +15537,121 @@ func (s *InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceManagedByIgmError struct { + // Error: [Output Only] Contents of the error. + Error *InstanceManagedByIgmErrorManagedInstanceError `json:"error,omitempty"` + + // InstanceActionDetails: [Output Only] Details of the instance action + // that triggered this error. May be null, if the error was not caused + // by an action on an instance. This field is optional. + InstanceActionDetails *InstanceManagedByIgmErrorInstanceActionDetails `json:"instanceActionDetails,omitempty"` + + // Timestamp: [Output Only] The time that this error occurred. This + // value is in RFC3339 text format. + Timestamp string `json:"timestamp,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceManagedByIgmError) MarshalJSON() ([]byte, error) { + type NoMethod InstanceManagedByIgmError + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceManagedByIgmErrorInstanceActionDetails struct { + // Action: [Output Only] Action that managed instance group was + // executing on the instance when the error occurred. Possible values: + // + // Possible values: + // "ABANDONING" + // "CREATING" + // "CREATING_WITHOUT_RETRIES" + // "DELETING" + // "NONE" + // "RECREATING" + // "REFRESHING" + // "RESTARTING" + // "VERIFYING" + Action string `json:"action,omitempty"` + + // Instance: [Output Only] The URL of the instance. The URL can be set + // even if the instance has not yet been created. + Instance string `json:"instance,omitempty"` + + // Version: [Output Only] Version this instance was created from, or was + // being created from, but the creation failed. Corresponds to one of + // the versions that were set on the Instance Group Manager resource at + // the time this instance was being created. + Version *ManagedInstanceVersion `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceManagedByIgmErrorInstanceActionDetails) MarshalJSON() ([]byte, error) { + type NoMethod InstanceManagedByIgmErrorInstanceActionDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceManagedByIgmErrorManagedInstanceError struct { + // Code: [Output Only] Error code. + Code string `json:"code,omitempty"` + + // Message: [Output Only] Error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceManagedByIgmErrorManagedInstanceError) MarshalJSON() ([]byte, error) { + type NoMethod InstanceManagedByIgmErrorManagedInstanceError + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceMoveRequest struct { // DestinationZone: The URL of the destination zone to move the // instance. This can be a full or partial URL. For example, the @@ -14735,6 +15747,10 @@ type InstanceProperties struct { // can consume from. ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // ResourcePolicies: Resource policies (names, not ULRs) applied to + // instances created from this template. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // Scheduling: Specifies the scheduling options for the instances that // are created from this template. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -14807,8 +15823,7 @@ func (s *InstanceReference) MarshalJSON() ([]byte, error) { // // You can use instance templates to create VM instances and managed // instance groups. For more information, read Instance Templates. (== -// resource_for beta.instanceTemplates ==) (== resource_for -// v1.instanceTemplates ==) +// resource_for {$api_version}.instanceTemplates ==) type InstanceTemplate struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance template in RFC3339 text format. @@ -15049,6 +16064,7 @@ type InstanceWithNamedPorts struct { // Status: [Output Only] The status of the instance. // // Possible values: + // "DEPROVISIONING" // "PROVISIONING" // "REPAIRING" // "RUNNING" @@ -15083,6 +16099,62 @@ func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesAddResourcePoliciesRequest struct { + // ResourcePolicies: Resource policies to be added to this instance. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourcePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesAddResourcePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstancesRemoveResourcePoliciesRequest struct { + // ResourcePolicies: Resource policies to be removed from this instance. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourcePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesRemoveResourcePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesScopedList struct { // Instances: [Output Only] A list of instances contained in this scope. Instances []*Instance `json:"instances,omitempty"` @@ -15438,8 +16510,8 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { // // An Interconnect resource is a dedicated connection between the GCP // network and your on-premises network. For more information, read the -// Dedicated Interconnect Overview. (== resource_for v1.interconnects -// ==) (== resource_for beta.interconnects ==) +// Dedicated Interconnect Overview. (== resource_for +// {$api_version}.interconnects ==) type Interconnect struct { // AdminEnabled: Administrative status of the interconnect. When this is // set to true, the Interconnect is functional and can carry traffic. @@ -15616,8 +16688,7 @@ func (s *Interconnect) MarshalJSON() ([]byte, error) { // You can use Interconnect attachments (VLANS) to connect your Virtual // Private Cloud networks to your on-premises networks through an // Interconnect. For more information, read Creating VLAN Attachments. -// (== resource_for beta.interconnectAttachments ==) (== resource_for -// v1.interconnectAttachments ==) +// (== resource_for {$api_version}.interconnectAttachments ==) type InterconnectAttachment struct { // AdminEnabled: Determines whether this Attachment will carry packets. // Not present for PARTNER_PROVIDER. @@ -15663,7 +16734,7 @@ type InterconnectAttachment struct { // within link-local address space (169.254.0.0/16) and must be /29 or // shorter (/28, /27, etc). Google will attempt to select an unused /29 // from the supplied candidate prefix(es). The request will fail if all - // possible /29s are in use on Google?s edge. If not supplied, Google + // possible /29s are in use on Google's edge. If not supplied, Google // will randomly select an unused /29 from all of link-local space. CandidateSubnets []string `json:"candidateSubnets,omitempty"` @@ -16175,7 +17246,7 @@ func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { // corresponding PARTNER attachments. type InterconnectAttachmentPartnerMetadata struct { // InterconnectName: Plain text name of the Interconnect this attachment - // is connected to, as displayed in the Partner?s portal. For instance + // is connected to, as displayed in the Partner's portal. For instance // "Chicago 1". This value may be validated to match approved Partner // values. InterconnectName string `json:"interconnectName,omitempty"` @@ -16185,7 +17256,7 @@ type InterconnectAttachmentPartnerMetadata struct { // values. PartnerName string `json:"partnerName,omitempty"` - // PortalUrl: URL of the Partner?s portal for this Attachment. Partners + // PortalUrl: URL of the Partner's portal for this Attachment. Partners // may customise this to be a deep link to the specific resource on the // Partner portal. This value may be validated to match approved Partner // values. @@ -16422,7 +17493,7 @@ func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { } // InterconnectDiagnostics: Diagnostics information about interconnect, -// contains detailed and current technical information about Google?s +// contains detailed and current technical information about Google's // side of the connection. type InterconnectDiagnostics struct { // ArpCaches: A list of InterconnectDiagnostics.ARPEntry objects, @@ -16494,11 +17565,11 @@ func (s *InterconnectDiagnosticsARPEntry) MarshalJSON() ([]byte, error) { } type InterconnectDiagnosticsLinkLACPStatus struct { - // GoogleSystemId: System ID of the port on Google?s side of the LACP + // GoogleSystemId: System ID of the port on Google's side of the LACP // exchange. GoogleSystemId string `json:"googleSystemId,omitempty"` - // NeighborSystemId: System ID of the port on the neighbor?s side of the + // NeighborSystemId: System ID of the port on the neighbor's side of the // LACP exchange. NeighborSystemId string `json:"neighborSystemId,omitempty"` @@ -17247,7 +18318,12 @@ func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// License: A license resource. +// License: Represents a License resource. +// +// A License represents billing and aggregate usage data for public and +// marketplace images. Caution This resource is intended for use only +// by third-party partners who are creating Cloud Marketplace images. +// (== resource_for {$api_version}.licenses ==) type License struct { // ChargesUseFee: [Output Only] Deprecated. This field no longer // reflects whether a license charges a usage fee. @@ -17314,6 +18390,12 @@ func (s *License) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LicenseCode: Represents a License Code resource. +// +// A License Code is a unique identifier used to represent a license +// resource. Caution This resource is intended for use only by +// third-party partners who are creating Cloud Marketplace images. (== +// resource_for {$api_version}.licenseCodes ==) type LicenseCode struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -17695,9 +18777,6 @@ func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { // "iam_principal" } ==> increment counter // /iam/policy/debug_access_count {iam_principal=[value of // IAMContext.principal]} -// -// TODO(b/141846426): Consider supporting "authority" and -// "iam_principal" fields in the same counter. type LogConfigCounterOptions struct { // CustomFields: Custom fields. CustomFields []*LogConfigCounterOptionsCustomField `json:"customFields,omitempty"` @@ -17769,8 +18848,10 @@ func (s *LogConfigCounterOptionsCustomField) MarshalJSON() ([]byte, error) { // LogConfigDataAccessOptions: Write a Data Access (Gin) log type LogConfigDataAccessOptions struct { // LogMode: Whether Gin logging should happen in a fail-closed manner at - // the caller. This is relevant only in the LocalIAM implementation, for - // now. + // the caller. This is currently supported in the LocalIAM + // implementation, Stubby C++, and Stubby Java. For Apps Framework, see + // go/af-audit-logging#failclosed. TODO(b/77591626): Add support for + // Stubby Go. TODO(b/129671387): Add support for Scaffolding. // // Possible values: // "LOG_FAIL_CLOSED" @@ -17804,8 +18885,7 @@ func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { // // You can use specific machine types for your VM instances based on // performance and pricing requirements. For more information, read -// Machine Types. (== resource_for v1.machineTypes ==) (== resource_for -// beta.machineTypes ==) +// Machine Types. (== resource_for {$api_version}.machineTypes ==) type MachineType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -18414,10 +19494,15 @@ type ManagedInstance struct { // even if the instance has not yet been created. Instance string `json:"instance,omitempty"` + // InstanceHealth: [Output Only] Health state of the instance per + // health-check. + InstanceHealth []*ManagedInstanceInstanceHealth `json:"instanceHealth,omitempty"` + // InstanceStatus: [Output Only] The status of the instance. This field // is empty when the instance does not exist. // // Possible values: + // "DEPROVISIONING" // "PROVISIONING" // "REPAIRING" // "RUNNING" @@ -18459,6 +19544,46 @@ func (s *ManagedInstance) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedInstanceInstanceHealth struct { + // DetailedHealthState: [Output Only] The current detailed instance + // health state. + // + // Possible values: + // "DRAINING" + // "HEALTHY" + // "TIMEOUT" + // "UNHEALTHY" + // "UNKNOWN" + DetailedHealthState string `json:"detailedHealthState,omitempty"` + + // HealthCheck: [Output Only] The URL for the health check that verifies + // whether the instance is healthy. + HealthCheck string `json:"healthCheck,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DetailedHealthState") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DetailedHealthState") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ManagedInstanceInstanceHealth) MarshalJSON() ([]byte, error) { + type NoMethod ManagedInstanceInstanceHealth + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedInstanceLastAttempt struct { // Errors: [Output Only] Encountered errors during the last attempt to // create or delete the instance. @@ -18670,13 +19795,13 @@ func (s *MetadataItems) MarshalJSON() ([]byte, error) { // restrict routing configuration to a limited set of loadbalancing // proxies. Proxies and sidecars involved in loadbalancing would // typically present metadata to the loadbalancers which need to match -// criteria specified here. If a match takes place, the relevant routing +// criteria specified here. If a match takes place, the relevant // configuration is made available to those proxies. // For each metadataFilter in this list, if its filterMatchCriteria is // set to MATCH_ANY, at least one of the filterLabels must match the // corresponding label provided in the metadata. If its // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels -// must match with corresponding labels in the provided metadata. +// must match with corresponding labels provided in the metadata. // An example for using metadataFilters would be: if loadbalancing // involves Envoys, they will only receive routing configuration when // values in metadataFilters match values supplied in `, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *AcceleratorTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.aggregatedList" call. +// Exactly one of *AcceleratorTypeAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of accelerator types.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.acceleratorTypes.get": + +type AcceleratorTypesGetCall struct { + s *Service + project string + zone string + acceleratorType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified accelerator type. +func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { + c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.acceleratorType = acceleratorType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "acceleratorType": c.acceleratorType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified accelerator type.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "acceleratorType" + // ], + // "parameters": { + // "acceleratorType": { + // "description": "Name of the accelerator type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.acceleratorTypes.list": + +type AcceleratorTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of accelerator types available to the +// specified project. +func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { + c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.list" call. +// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of accelerator types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *AddressesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AddressesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an address resource in the specified project by using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project by using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *AutoscalersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AutoscalersAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler resource. Gets a list of +// available autoscalers by making a list() request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of autoscalers contained within the specified +// zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.addSignedUrlKey": + +type BackendBucketsAddSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend bucket. +func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { + c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.delete": + +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendBucket resource. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendBucket resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendBuckets.delete", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.deleteSignedUrlKey": + +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend bucket. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket", + // "keyName" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.get": + +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendBucket resource. Gets a list of +// available backend buckets by making a list() request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.get", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "BackendBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendBuckets.insert": + +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.list": + +type BackendBucketsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendBucketList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucketList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets", + // "response": { + // "$ref": "BackendBucketList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendBuckets.patch": + +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified BackendBucket resource with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendBuckets.patch", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } -type AcceleratorTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header } -// AggregatedList: Retrieves an aggregated list of accelerator types. -// (== suppress_warning http-rest-shadowed ==) -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} +// method id "compute.backendBuckets.update": -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c +type BackendBucketsUpdateCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { +func (c *BackendBucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { +// Do executes the "compute.backendBuckets.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38338,7 +45195,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38350,34 +45207,19 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", // "parameterOrder": [ - // "project" + // "project", + // "backendBucket" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -38386,136 +45228,131 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/acceleratorTypes", + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, // "response": { - // "$ref": "AcceleratorTypeAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.acceleratorTypes.get": +// method id "compute.backendServices.addSignedUrlKey": -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified accelerator type. (== suppress_warning -// http-rest-shadowed ==) -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend service. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.acceleratorType = acceleratorType + c.backendService = backendService + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38534,7 +45371,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38546,19 +45383,17 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", + // "description": "Adds a key for validating requests with signed URLs for this backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.addSignedUrlKey", // "parameterOrder": [ // "project", - // "zone", - // "acceleratorType" + // "backendService" // ], // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -38569,45 +45404,43 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, // "response": { - // "$ref": "AcceleratorType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.acceleratorTypes.list": +// method id "compute.backendServices.aggregatedList": -type AcceleratorTypesListCall struct { +type BackendServicesAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of accelerator types available to the -// specified project. (== suppress_warning http-rest-shadowed ==) -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -38615,36 +45448,50 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *BackendServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *BackendServicesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -38654,22 +45501,23 @@ func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorType // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -38677,7 +45525,7 @@ func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypes // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38687,7 +45535,7 @@ func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorType // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -38695,23 +45543,23 @@ func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTyp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { +func (c *BackendServicesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -38722,7 +45570,7 @@ func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -38731,19 +45579,18 @@ func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38762,7 +45609,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeList{ + ret := &BackendServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38774,55 +45621,52 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", + // "id": "compute.backendServices.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes", + // "path": "{project}/aggregated/backendServices", // "response": { - // "$ref": "AcceleratorTypeList" + // "$ref": "BackendServiceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38836,7 +45680,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -38847,165 +45691,109 @@ func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*Accelerato if err := f(x); err != nil { return err } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.aggregatedList": - -type AddressesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of addresses. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +// method id "compute.backendServices.delete": + +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { +func (c *BackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39024,7 +45812,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39036,34 +45824,19 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", // "parameterOrder": [ - // "project" + // "project", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -39072,62 +45845,43 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/addresses", + // "path": "{project}/global/backendServices/{backendService}", // "response": { - // "$ref": "AddressAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.delete": +// method id "compute.backendServices.deleteSignedUrlKey": -type AddressesDeleteCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend service. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) return c } @@ -39145,7 +45899,7 @@ func (r *AddressesService) Delete(project string, region string, address string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39153,7 +45907,7 @@ func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39161,23 +45915,23 @@ func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39185,29 +45939,28 @@ func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.delete" call. +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39238,33 +45991,31 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", + // "description": "Deletes a key for validating requests with signed URLs for this backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.deleteSignedUrlKey", // "parameterOrder": [ // "project", - // "region", - // "address" + // "backendService", + // "keyName" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -39274,7 +46025,7 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", // "response": { // "$ref": "Operation" // }, @@ -39286,34 +46037,32 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.get": +// method id "compute.backendServices.get": -type AddressesGetCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified address resource. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified BackendService resource. Gets a list of +// available backend services. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39323,7 +46072,7 @@ func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -39331,23 +46080,23 @@ func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { +func (c *BackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39358,7 +46107,7 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -39366,21 +46115,20 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39399,7 +46147,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39411,17 +46159,16 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", // "httpMethod": "GET", - // "id": "compute.addresses.get", + // "id": "compute.backendServices.get", // "parameterOrder": [ // "project", - // "region", - // "address" + // "backendService" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", + // "backendService": { + // "description": "Name of the BackendService resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -39433,18 +46180,164 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getHealth": + +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceGroupHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "Address" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39455,27 +46348,26 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } -// method id "compute.addresses.insert": +// method id "compute.backendServices.insert": -type AddressesInsertCall struct { - s *Service - project string - region string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project by using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Understanding backend services for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendservice = backendservice return c } @@ -39493,7 +46385,7 @@ func (r *AddressesService) Insert(project string, region string, address *Addres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39501,7 +46393,7 @@ func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39509,36 +46401,36 @@ func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { +func (c *BackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -39547,19 +46439,18 @@ func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.insert" call. +// Do executes the "compute.backendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39590,12 +46481,11 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Understanding backend services for more information.", // "httpMethod": "POST", - // "id": "compute.addresses.insert", + // "id": "compute.backendServices.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -39605,22 +46495,15 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/backendServices", // "request": { - // "$ref": "Address" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -39633,25 +46516,23 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.list": +// method id "compute.backendServices.list": -type AddressesListCall struct { +type BackendServicesListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of addresses contained within the specified -// region. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of BackendService resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -39659,36 +46540,37 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -39698,22 +46580,23 @@ func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -39721,7 +46604,7 @@ func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39731,7 +46614,7 @@ func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -39739,23 +46622,23 @@ func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { +func (c *BackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39766,7 +46649,7 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -39775,19 +46658,18 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39806,7 +46688,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39818,34 +46700,33 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } return ret, nil // { - // "description": "Retrieves a list of addresses contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of BackendService resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.addresses.list", + // "id": "compute.backendServices.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -39855,18 +46736,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/backendServices", // "response": { - // "$ref": "AddressList" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39880,7 +46754,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -39898,157 +46772,290 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } -// method id "compute.autoscalers.aggregatedList": +// method id "compute.backendServices.patch": -type AutoscalersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. (== -// suppress_warning http-rest-shadowed ==) -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified BackendService resource with the data +// included in the request. There are several Understanding backend +// services to keep in mind when updating a backend service. Read +// Understanding backend services for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.backendService = backendService + c.backendservice = backendservice return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.setSecurityPolicy": + +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSecurityPolicy: Sets the security policy for the specified backend +// service. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40067,7 +47074,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40079,34 +47086,18 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", + // "description": "Sets the security policy for the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ - // "project" + // "project", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -40115,61 +47106,50 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/autoscalers", + // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.delete": +// method id "compute.backendServices.update": -type AutoscalersDeleteCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. (== suppress_warning -// http-rest-shadowed ==) -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several Understanding backend +// services to keep in mind when updating a backend service. Read +// Understanding backend services for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendService = backendService + c.backendservice = backendservice return c } @@ -40187,7 +47167,7 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -40195,7 +47175,7 @@ func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40203,53 +47183,57 @@ func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *BackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. +// Do executes the "compute.backendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40280,17 +47264,16 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -40307,16 +47290,12 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -40328,34 +47307,107 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.get": +// method id "compute.diskTypes.aggregatedList": -type AutoscalersGetCall struct { +type DiskTypesAggregatedListCall struct { s *Service project string - zone string - autoscaler string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *DiskTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *DiskTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40365,7 +47417,7 @@ func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -40373,23 +47425,23 @@ func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *DiskTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40400,7 +47452,7 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -40408,21 +47460,19 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40441,7 +47491,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40453,20 +47503,39 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of disk types.", // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "id": "compute.diskTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" + // "project" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -40475,18 +47544,11 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/aggregated/diskTypes", // "response": { - // "$ref": "Autoscaler" + // "$ref": "DiskTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40497,109 +47559,122 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } -// method id "compute.autoscalers.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified disk type. Gets a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.autoscaler = autoscaler - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { - c.urlParams_.Set("requestId", requestId) + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *DiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40618,7 +47693,7 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40630,14 +47705,22 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "diskType" // ], // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -40645,37 +47728,30 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.autoscalers.list": +// method id "compute.diskTypes.list": -type AutoscalersListCall struct { +type DiskTypesListCall struct { s *Service project string zone string @@ -40685,10 +47761,11 @@ type AutoscalersListCall struct { header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// zone. (== suppress_warning http-rest-shadowed ==) -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c @@ -40698,36 +47775,37 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -40737,22 +47815,23 @@ func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -40760,7 +47839,7 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40770,7 +47849,7 @@ func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -40778,23 +47857,23 @@ func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *DiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40805,7 +47884,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -40819,14 +47898,14 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40845,7 +47924,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerList{ + ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40857,34 +47936,34 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of disk types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "id": "compute.diskTypes.list", // "parameterOrder": [ // "project", // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -40896,16 +47975,16 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/zones/{zone}/diskTypes", // "response": { - // "$ref": "AutoscalerList" + // "$ref": "DiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40919,7 +47998,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -40937,34 +48016,28 @@ func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) } } -// method id "compute.autoscalers.patch": +// method id "compute.disks.addResourcePolicies": -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksAddResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. (== -// suppress_warning http-rest-shadowed ==) -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddResourcePolicies: Adds existing resource policies to a disk. You +// can only add one policy which will be applied to this disk for +// scheduling snapshot creation. +func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { + c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.disk = disk + c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest return c } @@ -40982,7 +48055,7 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { +func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -40990,7 +48063,7 @@ func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { +func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40998,38 +48071,38 @@ func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { +func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { +func (c *DisksAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -41037,18 +48110,19 @@ func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.patch" call. +// Do executes the "compute.disks.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41079,18 +48153,20 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", + // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + // "httpMethod": "POST", + // "id": "compute.disks.addResourcePolicies", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "disk" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -41106,16 +48182,16 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", // "request": { - // "$ref": "Autoscaler" + // "$ref": "DisksAddResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -41128,116 +48204,172 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.autoscalers.update": +// method id "compute.disks.aggregatedList": -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler return c } -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *DisksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *DisksAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *DisksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41256,7 +48388,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41268,74 +48400,113 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Retrieves an aggregated list of persistent disks.", + // "httpMethod": "GET", + // "id": "compute.disks.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/aggregated/disks", // "response": { - // "$ref": "Operation" + // "$ref": "DiskAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.addSignedUrlKey": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendBucketsAddSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend bucket. (== suppress_warning http-rest-shadowed ==) -func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { - c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.signedurlkey = signedurlkey + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Specifies to create an application consistent snapshot by informing +// the OS to prepare for the snapshot process. Currently only supported +// on Windows instances using the Volume Shadow Copy Service (VSS). +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -41353,7 +48524,7 @@ func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41361,7 +48532,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *Backend // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41369,36 +48540,36 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *Backen // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { +func (c *DisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -41406,20 +48577,21 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Do executes the "compute.disks.createSnapshot" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41450,20 +48622,27 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a snapshot of a specified persistent disk.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.addSignedUrlKey", + // "id": "compute.disks.createSnapshot", // "parameterOrder": [ // "project", - // "backendBucket" + // "zone", + // "disk" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "disk": { + // "description": "Name of the persistent disk to snapshot.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "guestFlush": { + // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -41475,11 +48654,18 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", // "request": { - // "$ref": "SignedUrlKey" + // "$ref": "Snapshot" // }, // "response": { // "$ref": "Operation" @@ -41492,23 +48678,28 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.backendBuckets.delete": +// method id "compute.disks.delete": -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. (== -// suppress_warning http-rest-shadowed ==) -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.zone = zone + c.disk = disk return c } @@ -41526,7 +48717,7 @@ func (r *BackendBucketsService) Delete(project string, backendBucket string) *Ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41534,7 +48725,7 @@ func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41542,23 +48733,23 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *DisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41566,7 +48757,7 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -41574,20 +48765,21 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. +// Do executes the "compute.disks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41618,18 +48810,18 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "id": "compute.disks.delete", // "parameterOrder": [ // "project", - // "backendBucket" + // "zone", + // "disk" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", + // "disk": { + // "description": "Name of the persistent disk to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -41644,9 +48836,16 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { // "$ref": "Operation" // }, @@ -41658,104 +48857,101 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.deleteSignedUrlKey": +// method id "compute.disks.get": -type BackendBucketsDeleteSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend bucket. (== suppress_warning http-rest-shadowed -// ==) -func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { - c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified persistent disk. Gets a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.urlParams_.Set("keyName", keyName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { +func (c *DisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41774,7 +48970,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41786,24 +48982,19 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.disks.get", // "parameterOrder": [ // "project", - // "backendBucket", - // "keyName" + // "zone", + // "disk" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "disk": { + // "description": "Name of the persistent disk to return.", // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -41814,50 +49005,54 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { - // "$ref": "Operation" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.get": +// method id "compute.disks.getIamPolicy": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { + c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41867,7 +49062,7 @@ func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { +func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -41875,23 +49070,23 @@ func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *DisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41902,7 +49097,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -41910,20 +49105,21 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +// Do executes the "compute.disks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41942,7 +49138,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucket{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41954,32 +49150,40 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "id": "compute.disks.getIamPolicy", // "parameterOrder": [ // "project", - // "backendBucket" + // "zone", + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", // "response": { - // "$ref": "BackendBucket" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -41990,24 +49194,29 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } -// method id "compute.backendBuckets.insert": +// method id "compute.disks.insert": -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket + c.zone = zone + c.disk = disk return c } @@ -42025,15 +49234,22 @@ func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBuc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42041,36 +49257,36 @@ func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *DisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -42079,18 +49295,19 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. +// Do executes the "compute.disks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42121,11 +49338,12 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "id": "compute.disks.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "project": { @@ -42139,11 +49357,23 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/zones/{zone}/disks", // "request": { - // "$ref": "BackendBucket" + // "$ref": "Disk" // }, // "response": { // "$ref": "Operation" @@ -42156,22 +49386,25 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.list": +// method id "compute.disks.list": -type BackendBucketsListCall struct { +type DisksListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -42179,36 +49412,37 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -42218,22 +49452,23 @@ func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsLis // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -42241,7 +49476,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42251,7 +49486,7 @@ func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -42259,23 +49494,23 @@ func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *DisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -42286,7 +49521,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -42295,18 +49530,19 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42325,7 +49561,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucketList{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42337,33 +49573,34 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of persistent disks contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "id": "compute.disks.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -42373,11 +49610,18 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/zones/{zone}/disks", // "response": { - // "$ref": "BackendBucketList" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42391,7 +49635,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -42409,27 +49653,26 @@ func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucke } } -// method id "compute.backendBuckets.patch": +// method id "compute.disks.removeResourcePolicies": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. (== -// suppress_warning http-rest-shadowed ==) -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveResourcePolicies: Removes resource policies from a disk. +func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { + c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.zone = zone + c.disk = disk + c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest return c } @@ -42447,7 +49690,7 @@ func (r *BackendBucketsService) Patch(project string, backendBucket string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { +func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42455,7 +49698,7 @@ func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42463,57 +49706,58 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. +// Do executes the "compute.disks.removeResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42544,16 +49788,17 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "description": "Removes resource policies from a disk.", + // "httpMethod": "POST", + // "id": "compute.disks.removeResourcePolicies", // "parameterOrder": [ // "project", - // "backendBucket" + // "zone", + // "disk" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", + // "disk": { + // "description": "The disk name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -42570,187 +49815,18 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendBuckets.update": - -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified BackendBucket resource with the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", // "request": { - // "$ref": "BackendBucket" + // "$ref": "DisksRemoveResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -42763,25 +49839,27 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.addSignedUrlKey": +// method id "compute.disks.resize": -type BackendServicesAddSignedUrlKeyCall struct { - s *Service - project string - backendService string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend service. (== suppress_warning http-rest-shadowed ==) -func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { - c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.signedurlkey = signedurlkey + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest return c } @@ -42799,7 +49877,7 @@ func (r *BackendServicesService) AddSignedUrlKey(project string, backendService // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42807,7 +49885,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *Backen // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42815,36 +49893,36 @@ func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { +func (c *DisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -42852,20 +49930,21 @@ func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Do executes the "compute.disks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42896,17 +49975,19 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", // "httpMethod": "POST", - // "id": "compute.backendServices.addSignedUrlKey", + // "id": "compute.disks.resize", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "disk": { + // "description": "The name of the persistent disk.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -42921,11 +50002,18 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "path": "{project}/zones/{zone}/disks/{disk}/resize", // "request": { - // "$ref": "SignedUrlKey" + // "$ref": "DisksResizeRequest" // }, // "response": { // "$ref": "Operation" @@ -42938,158 +50026,93 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.backendServices.aggregatedList": +// method id "compute.disks.setIamPolicy": -type BackendServicesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. (== -// suppress_warning http-rest-shadowed ==) -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { + c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { - c.ifNoneMatch_ = entityTag +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *DisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +// Do executes the "compute.disks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43108,7 +50131,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43120,96 +50143,73 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.disks.setIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Name of the project scoping this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/backendServices", + // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendServices.delete": +// method id "compute.disks.setLabels": -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendService resource. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.zone = zone + c.resource = resource + c.zonesetlabelsrequest = zonesetlabelsrequest return c } @@ -43227,7 +50227,7 @@ func (r *BackendServicesService) Delete(project string, backendService string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43235,7 +50235,7 @@ func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43243,52 +50243,58 @@ func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { +func (c *DisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.delete" call. +// Do executes the "compute.disks.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43319,21 +50325,15 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.disks.setLabels", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43345,9 +50345,26 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "request": { + // "$ref": "ZoneSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -43359,51 +50376,34 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.deleteSignedUrlKey": +// method id "compute.disks.testIamPermissions": -type BackendServicesDeleteSignedUrlKeyCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend service. (== suppress_warning -// http-rest-shadowed ==) -func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { - c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.urlParams_.Set("keyName", keyName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43411,31 +50411,36 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Ba // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { +func (c *DisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -43443,20 +50448,21 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.deleteSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43475,7 +50481,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43487,145 +50493,148 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendServices.deleteSignedUrlKey", + // "id": "compute.disks.testIamPermissions", // "parameterOrder": [ // "project", - // "backendService", - // "keyName" + // "zone", + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.get": +// method id "compute.externalVpnGateways.delete": -type BackendServicesGetCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysDeleteCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. (== suppress_warning http-rest-shadowed -// ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified externalVpnGateway. +func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { + c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.externalVpnGateway = externalVpnGateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { +func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { +func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { +func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// Do executes the "compute.externalVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43644,7 +50653,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43656,18 +50665,18 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.backendServices.get", + // "description": "Deletes the specified externalVpnGateway.", + // "httpMethod": "DELETE", + // "id": "compute.externalVpnGateways.delete", // "parameterOrder": [ // "project", - // "backendService" + // "externalVpnGateway" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateways to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -43677,106 +50686,116 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", // "response": { - // "$ref": "BackendService" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendServices.getHealth": +// method id "compute.externalVpnGateways.get": -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysGetCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// BackendService. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified externalVpnGateway. Get a list of +// available externalVpnGateways by making a list() request. +func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { + c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.externalVpnGateway = externalVpnGateway return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { +func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { +func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { +func (c *ExternalVpnGatewaysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +} + +// Do executes the "compute.externalVpnGateways.get" call. +// Exactly one of *ExternalVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43795,7 +50814,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &ExternalVpnGateway{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43807,34 +50826,32 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Gets the most recent health check results for this BackendService. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", + // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.externalVpnGateways.get", // "parameterOrder": [ // "project", - // "backendService" + // "externalVpnGateway" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateway to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "ExternalVpnGateway" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -43845,27 +50862,23 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.backendServices.insert": +// method id "compute.externalVpnGateways.insert": -type BackendServicesInsertCall struct { - s *Service - project string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysInsertCall struct { + s *Service + project string + externalvpngateway *ExternalVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ExternalVpnGateway in the specified project using +// the data included in the request. +func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { + c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice + c.externalvpngateway = externalvpngateway return c } @@ -43883,7 +50896,7 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { +func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43891,7 +50904,7 @@ func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43899,36 +50912,36 @@ func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -43941,14 +50954,14 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. +// Do executes the "compute.externalVpnGateways.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43979,9 +50992,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "id": "compute.externalVpnGateways.insert", // "parameterOrder": [ // "project" // ], @@ -43999,9 +51012,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/global/externalVpnGateways", // "request": { - // "$ref": "BackendService" + // "$ref": "ExternalVpnGateway" // }, // "response": { // "$ref": "Operation" @@ -44014,9 +51027,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.list": +// method id "compute.externalVpnGateways.list": -type BackendServicesListCall struct { +type ExternalVpnGatewaysListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -44025,11 +51038,10 @@ type BackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of BackendService resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of ExternalVpnGateway available to the +// specified project. +func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { + c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -44038,36 +51050,37 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -44077,22 +51090,23 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { +func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { +func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -44100,7 +51114,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44110,7 +51124,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { c.ifNoneMatch_ = entityTag return c } @@ -44118,23 +51132,23 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *ExternalVpnGatewaysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44145,7 +51159,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -44158,14 +51172,14 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any +// Do executes the "compute.externalVpnGateways.list" call. +// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was +// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44184,7 +51198,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &ExternalVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44196,33 +51210,33 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.externalVpnGateways.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -44234,9 +51248,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/global/externalVpnGateways", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "ExternalVpnGatewayList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -44250,7 +51264,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -44268,56 +51282,32 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": +// method id "compute.externalVpnGateways.setLabels": -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { + c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44325,57 +51315,57 @@ func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. +// Do executes the "compute.externalVpnGateways.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44406,21 +51396,14 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.setLabels", // "parameterOrder": [ // "project", - // "backendService" + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44428,15 +51411,17 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/global/externalVpnGateways/{resource}/setLabels", // "request": { - // "$ref": "BackendService" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -44449,51 +51434,32 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.setSecurityPolicy": +// method id "compute.externalVpnGateways.testIamPermissions": -type BackendServicesSetSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSecurityPolicy: Sets the security policy for the specified backend -// service. (== suppress_warning http-rest-shadowed ==) -func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { - c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { + c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44501,36 +51467,36 @@ func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -44538,20 +51504,20 @@ func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setSecurityPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.externalVpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44570,7 +51536,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44582,20 +51548,14 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the security policy for the specified backend service. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendServices.setSecurityPolicy", + // "id": "compute.externalVpnGateways.testIamPermissions", // "parameterOrder": [ // "project", - // "backendService" + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44603,50 +51563,47 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", // "request": { - // "$ref": "SecurityPolicyReference" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.update": +// method id "compute.firewalls.delete": -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.firewall = firewall return c } @@ -44664,7 +51621,7 @@ func (r *BackendServicesService) Update(project string, backendService string, b // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44672,7 +51629,7 @@ func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44680,57 +51637,52 @@ func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *FirewallsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. +// Do executes the "compute.firewalls.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44761,16 +51713,16 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "description": "Deletes the specified firewall.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", // "parameterOrder": [ // "project", - // "backendService" + // "firewall" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "firewall": { + // "description": "Name of the firewall rule to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -44789,10 +51741,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/global/firewalls/{firewall}", // "response": { // "$ref": "Operation" // }, @@ -44804,93 +51753,31 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.diskTypes.aggregatedList": +// method id "compute.firewalls.get": -type DiskTypesAggregatedListCall struct { +type FirewallsGetCall struct { s *Service project string + firewall string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.firewall = firewall return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44900,7 +51787,7 @@ func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -44908,23 +51795,23 @@ func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *FirewallsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44935,7 +51822,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -44943,19 +51830,20 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44974,7 +51862,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeAggregatedList{ + ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44986,34 +51874,19 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified firewall.", // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "id": "compute.firewalls.get", // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -45024,9 +51897,9 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "type": "string" // } // }, - // "path": "{project}/aggregated/diskTypes", + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "Firewall" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45037,123 +51910,107 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.diskTypes.get": +// method id "compute.firewalls.insert": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.diskType = diskType + c.firewall = firewall + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *FirewallsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "diskType": c.diskType, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.firewalls.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45172,7 +52029,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45184,22 +52041,13 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.firewalls.insert", // "parameterOrder": [ - // "project", - // "zone", - // "diskType" + // "project" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45207,46 +52055,44 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "{project}/global/firewalls", + // "request": { + // "$ref": "Firewall" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.diskTypes.list": +// method id "compute.firewalls.list": -type DiskTypesListCall struct { +type FirewallsListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of disk types available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of firewall rules available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -45254,36 +52100,37 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -45293,22 +52140,23 @@ func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -45316,7 +52164,7 @@ func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45326,7 +52174,7 @@ func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { c.ifNoneMatch_ = entityTag return c } @@ -45334,23 +52182,23 @@ func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *FirewallsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45361,7 +52209,7 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -45370,19 +52218,18 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// *FirewallList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45401,7 +52248,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeList{ + ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45413,34 +52260,33 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of firewall rules available to the specified project.", // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "id": "compute.firewalls.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -45450,18 +52296,11 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes", + // "path": "{project}/global/firewalls", // "response": { - // "$ref": "DiskTypeList" + // "$ref": "FirewallList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45475,7 +52314,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -45493,29 +52332,27 @@ func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) err } } -// method id "compute.disks.addResourcePolicies": +// method id "compute.firewalls.patch": -type DisksAddResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a disk. You -// can only add one policy which will be applied to this disk for -// scheduling snapshot creation. (== suppress_warning http-rest-shadowed -// ==) -func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { - c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -45533,7 +52370,7 @@ func (r *DisksService) AddResourcePolicies(project string, zone string, disk str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45541,7 +52378,7 @@ func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddReso // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45549,58 +52386,57 @@ func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddRes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAddResourcePoliciesCall) Header() http.Header { +func (c *FirewallsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.addResourcePolicies" call. +// Do executes the "compute.firewalls.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45631,17 +52467,16 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.disks.addResourcePolicies", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.firewalls.patch", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "firewall" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "firewall": { + // "description": "Name of the firewall rule to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -45658,18 +52493,190 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.update": + +type FirewallsUpdateCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified firewall rule with the data included in +// the request. Note that all fields will be updated if using PUT, even +// fields that are not specified. To update individual fields, please +// use PATCH instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.firewalls.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead.", + // "httpMethod": "PUT", + // "id": "compute.firewalls.update", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "path": "{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "DisksAddResourcePoliciesRequest" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -45682,9 +52689,9 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.disks.aggregatedList": +// method id "compute.forwardingRules.aggregatedList": -type DisksAggregatedListCall struct { +type ForwardingRulesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -45693,11 +52700,10 @@ type DisksAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of persistent disks. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of forwarding rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -45706,36 +52712,50 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *ForwardingRulesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -45745,22 +52765,23 @@ func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -45768,7 +52789,7 @@ func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45778,7 +52799,7 @@ func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -45786,23 +52807,23 @@ func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregated // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45813,7 +52834,7 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -45825,15 +52846,15 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } - -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use + +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45852,7 +52873,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskAggregatedList{ + ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45864,33 +52885,38 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of forwarding rules.", // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -45902,9 +52928,9 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "type": "string" // } // }, - // "path": "{project}/aggregated/disks", + // "path": "{project}/aggregated/forwardingRules", // "response": { - // "$ref": "DiskAggregatedList" + // "$ref": "ForwardingRuleAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45918,7 +52944,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -45936,37 +52962,25 @@ func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggrega } } -// method id "compute.disks.createSnapshot": +// method id "compute.forwardingRules.delete": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesDeleteCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": [Input Only] -// Specifies to create an application consistent snapshot by informing -// the OS to prepare for the snapshot process. Currently only supported -// on Windows instances using the Volume Shadow Copy Service (VSS). -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + c.region = region + c.forwardingRule = forwardingRule return c } @@ -45984,7 +52998,7 @@ func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapsh // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45992,7 +53006,7 @@ func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapsh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46000,58 +53014,53 @@ func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnaps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *ForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. +// Do executes the "compute.forwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46082,27 +53091,22 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region", + // "forwardingRule" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "guestFlush": { - // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46110,57 +53114,219 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.get": + +type ForwardingRulesGetCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified ForwardingRule resource.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.get", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", - // "request": { - // "$ref": "Snapshot" - // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.delete": +// method id "compute.forwardingRules.insert": -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesInsertCall struct { + s *Service + project string + region string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.region = region + c.forwardingrule = forwardingrule return c } @@ -46178,7 +53344,7 @@ func (r *DisksService) Delete(project string, zone string, disk string) *DisksDe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46186,7 +53352,7 @@ func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46194,53 +53360,57 @@ func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *ForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. +// Do executes the "compute.forwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46271,25 +53441,25 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.insert", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -46297,16 +53467,12 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/regions/{region}/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { // "$ref": "Operation" // }, @@ -46318,35 +53484,97 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.get": +// method id "compute.forwardingRules.list": -type DisksGetCall struct { +type ForwardingRulesListCall struct { s *Service project string - zone string - disk string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46356,7 +53584,7 @@ func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -46364,23 +53592,23 @@ func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *ForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46391,7 +53619,7 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -46400,20 +53628,19 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46432,7 +53659,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46444,20 +53671,35 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.disks.get", + // "id": "compute.forwardingRules.list", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -46467,17 +53709,17 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/regions/{region}/forwardingRules", // "response": { - // "$ref": "Disk" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46488,101 +53730,135 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.disks.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.forwardingRules.patch": -type DisksGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesPatchCall struct { + s *Service + project string + region string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { - c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. +func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { + c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.region = region + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { +func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { +func (c *ForwardingRulesPatchCall) Context(ctx context.Context) *ForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetIamPolicyCall) Header() http.Header { +func (c *ForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.forwardingRules.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46601,7 +53877,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46613,74 +53889,79 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.disks.getIamPolicy", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "httpMethod": "PATCH", + // "id": "compute.forwardingRules.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "forwardingRule" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.insert": +// method id "compute.forwardingRules.setTarget": -type DisksInsertCall struct { - s *Service - project string - zone string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } @@ -46698,22 +53979,15 @@ func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksIns // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46721,36 +53995,36 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *ForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -46758,20 +54032,21 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. +// Do executes the "compute.forwardingRules.setTarget" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46802,14 +54077,22 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "forwardingRule" // ], // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46817,27 +54100,22 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", // "request": { - // "$ref": "Disk" + // "$ref": "TargetReference" // }, // "response": { // "$ref": "Operation" @@ -46850,161 +54128,102 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.list": +// method id "compute.globalAddresses.delete": -type DisksListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalAddressesDeleteCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DisksListCall) Filter(filter string) *DisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.address = address return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *GlobalAddressesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.globalAddresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47023,7 +54242,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47035,35 +54254,19 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.disks.list", + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", // "parameterOrder": [ // "project", - // "zone" + // "address" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -47073,154 +54276,116 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.removeResourcePolicies": +// method id "compute.globalAddresses.get": -type DisksRemoveResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesGetCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a disk. (== -// suppress_warning http-rest-shadowed ==) -func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { - c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified address resource. Gets a list of available +// addresses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.address = address return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *GlobalAddressesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.removeResourcePolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47239,7 +54404,7 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Address{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47251,17 +54416,16 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Removes resource policies from a disk. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.disks.removeResourcePolicies", + // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.get", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "address" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "address": { + // "description": "Name of the address resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -47273,56 +54437,39 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", - // "request": { - // "$ref": "DisksRemoveResourcePoliciesRequest" - // }, + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "Operation" + // "$ref": "Address" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.resize": +// method id "compute.globalAddresses.insert": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesInsertCall struct { + s *Service + project string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. You can only increase -// the size of the disk. (== suppress_warning http-rest-shadowed ==) -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an address resource in the specified project by using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest + c.address = address return c } @@ -47340,7 +54487,7 @@ func (r *DisksService) Resize(project string, zone string, disk string, disksres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47348,7 +54495,7 @@ func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47356,36 +54503,36 @@ func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *GlobalAddressesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -47394,20 +54541,18 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. +// Do executes the "compute.globalAddresses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47438,22 +54583,13 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk. You can only increase the size of the disk. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates an address resource in the specified project by using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "id": "compute.globalAddresses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47465,18 +54601,11 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "path": "{project}/global/addresses", // "request": { - // "$ref": "DisksResizeRequest" + // "$ref": "Address" // }, // "response": { // "$ref": "Operation" @@ -47489,94 +54618,159 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.setIamPolicy": +// method id "compute.globalAddresses.list": -type DisksSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { - c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of global addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetIamPolicyCall) Header() http.Header { +func (c *GlobalAddressesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47595,7 +54789,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47607,74 +54801,95 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.disks.setIamPolicy", + // "description": "Retrieves a list of global addresses.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.list", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "{project}/global/addresses", // "response": { - // "$ref": "Policy" + // "$ref": "AddressList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setLabels": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalForwardingRules.delete": + +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. (== suppress_warning -// http-rest-shadowed ==) -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified GlobalForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest + c.forwardingRule = forwardingRule return c } @@ -47692,7 +54907,7 @@ func (r *DisksService) SetLabels(project string, zone string, resource string, z // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47700,7 +54915,7 @@ func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47708,58 +54923,52 @@ func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setLabels" call. +// Do executes the "compute.globalForwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47790,15 +54999,21 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.disks.setLabels", + // "description": "Deletes the specified GlobalForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "forwardingRule" // ], // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47810,26 +55025,9 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", - // "request": { - // "$ref": "ZoneSetLabelsRequest" - // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { // "$ref": "Operation" // }, @@ -47841,93 +55039,98 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.disks.testIamPermissions": +// method id "compute.globalForwardingRules.get": -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified GlobalForwardingRule resource. Gets a list +// of available forwarding rules by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *GlobalForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47946,7 +55149,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47958,43 +55161,32 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "forwardingRule" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48005,23 +55197,24 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } -// method id "compute.externalVpnGateways.delete": +// method id "compute.globalForwardingRules.insert": -type ExternalVpnGatewaysDeleteCall struct { - s *Service - project string - externalVpnGateway string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified externalVpnGateway. (== -// suppress_warning http-rest-shadowed ==) -func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { - c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a GlobalForwardingRule resource in the specified +// project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.externalVpnGateway = externalVpnGateway + c.forwardingrule = forwardingrule return c } @@ -48039,7 +55232,7 @@ func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48047,7 +55240,7 @@ func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48055,52 +55248,56 @@ func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "externalVpnGateway": c.externalVpnGateway, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.delete" call. +// Do executes the "compute.globalForwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48131,21 +55328,13 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Deletes the specified externalVpnGateway. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.externalVpnGateways.delete", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ - // "project", - // "externalVpnGateway" + // "project" // ], // "parameters": { - // "externalVpnGateway": { - // "description": "Name of the externalVpnGateways to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48159,7 +55348,10 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "path": "{project}/global/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { // "$ref": "Operation" // }, @@ -48171,32 +55363,95 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.externalVpnGateways.get": +// method id "compute.globalForwardingRules.list": -type ExternalVpnGatewaysGetCall struct { - s *Service - project string - externalVpnGateway string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified externalVpnGateway. Get a list of -// available externalVpnGateways by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { - c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of GlobalForwardingRule resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.externalVpnGateway = externalVpnGateway + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48206,7 +55461,7 @@ func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -48214,23 +55469,23 @@ func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysGetCall) Header() http.Header { +func (c *GlobalForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48241,7 +55496,7 @@ func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -48249,20 +55504,19 @@ func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "externalVpnGateway": c.externalVpnGateway, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.get" call. -// Exactly one of *ExternalVpnGateway or error will be non-nil. Any +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ExternalVpnGateway.ServerResponse.Header or (if a response was +// *ForwardingRuleList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48281,7 +55535,7 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ExternalVpnGateway{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48293,19 +55547,34 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External } return ret, nil // { - // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.externalVpnGateways.get", + // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ - // "project", - // "externalVpnGateway" + // "project" // ], // "parameters": { - // "externalVpnGateway": { - // "description": "Name of the externalVpnGateway to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -48316,9 +55585,9 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External // "type": "string" // } // }, - // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "path": "{project}/global/forwardingRules", // "response": { - // "$ref": "ExternalVpnGateway" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48329,24 +55598,48 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External } -// method id "compute.externalVpnGateways.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ExternalVpnGatewaysInsertCall struct { - s *Service - project string - externalvpngateway *ExternalVpnGateway - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalForwardingRules.patch": + +type GlobalForwardingRulesPatchCall struct { + s *Service + project string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ExternalVpnGateway in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { - c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. +func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { + c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.externalvpngateway = externalvpngateway + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule return c } @@ -48364,7 +55657,7 @@ func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { +func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48372,7 +55665,7 @@ func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { +func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48380,56 +55673,57 @@ func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { +func (c *GlobalForwardingRulesPatchCall) Context(ctx context.Context) *GlobalForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { +func (c *GlobalForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.insert" call. +// Do executes the "compute.globalForwardingRules.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48460,13 +55754,21 @@ func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.externalVpnGateways.insert", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "httpMethod": "PATCH", + // "id": "compute.globalForwardingRules.patch", // "parameterOrder": [ - // "project" + // "project", + // "forwardingRule" // ], // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48480,9 +55782,9 @@ func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "{project}/global/externalVpnGateways", + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "request": { - // "$ref": "ExternalVpnGateway" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -48495,157 +55797,110 @@ func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.externalVpnGateways.list": +// method id "compute.globalForwardingRules.setTarget": -type ExternalVpnGatewaysListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of ExternalVpnGateway available to the -// specified project. (== suppress_warning http-rest-shadowed ==) -func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { - c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for the GlobalForwardingRule resource. +// The new target should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysListCall) Header() http.Header { +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.list" call. -// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { +// Do executes the "compute.globalForwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48664,7 +55919,7 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ExternalVpnGatewayList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48676,34 +55931,19 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa } return ret, nil // { - // "description": "Retrieves the list of ExternalVpnGateway available to the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.externalVpnGateways.list", + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", // "parameterOrder": [ - // "project" + // "project", + // "forwardingRule" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -48712,69 +55952,73 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/externalVpnGateways", + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "ExternalVpnGatewayList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.externalVpnGateways.setLabels": +// method id "compute.globalNetworkEndpointGroups.attachNetworkEndpoints": -type ExternalVpnGatewaysSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more -// about labels, read the Labeling Resources documentation. (== -// suppress_warning http-rest-shadowed ==) -func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { - c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachNetworkEndpoints: Attach a network endpoint to the specified +// network endpoint group. +func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.networkEndpointGroup = networkEndpointGroup + c.globalnetworkendpointgroupsattachendpointsrequest = globalnetworkendpointgroupsattachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48782,36 +56026,36 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *Externa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsattachendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48819,20 +56063,20 @@ func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.setLabels" call. +// Do executes the "compute.globalNetworkEndpointGroups.attachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48863,14 +56107,20 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + // "description": "Attach a network endpoint to the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.externalVpnGateways.setLabels", + // "id": "compute.globalNetworkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ // "project", - // "resource" + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48878,17 +56128,15 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/externalVpnGateways/{resource}/setLabels", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "GlobalNetworkEndpointGroupsAttachEndpointsRequest" // }, // "response": { // "$ref": "Operation" @@ -48901,32 +56149,49 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.externalVpnGateways.testIamPermissions": +// method id "compute.globalNetworkEndpointGroups.delete": -type ExternalVpnGatewaysTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsDeleteCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { - c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group.Note that the +// NEG cannot be deleted if there are backend services referencing it. +func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { + c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48934,57 +56199,52 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.globalNetworkEndpointGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49003,7 +56263,7 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49015,14 +56275,20 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.externalVpnGateways.testIamPermissions", + // "description": "Deletes the specified network endpoint group.Note that the NEG cannot be deleted if there are backend services referencing it.", + // "httpMethod": "DELETE", + // "id": "compute.globalNetworkEndpointGroups.delete", // "parameterOrder": [ // "project", - // "resource" + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -49030,48 +56296,43 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOpt // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.delete": +// method id "compute.globalNetworkEndpointGroups.detachNetworkEndpoints": -type FirewallsDeleteCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified firewall. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach the network endpoint from the +// specified network endpoint group. +func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.networkEndpointGroup = networkEndpointGroup + c.globalnetworkendpointgroupsdetachendpointsrequest = globalnetworkendpointgroupsdetachendpointsrequest return c } @@ -49089,7 +56350,7 @@ func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDel // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49097,7 +56358,7 @@ func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49105,52 +56366,57 @@ func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.delete" call. +// Do executes the "compute.globalNetworkEndpointGroups.detachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49181,18 +56447,17 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", + // "description": "Detach the network endpoint from the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.globalNetworkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", - // "firewall" + // "networkEndpointGroup" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -49209,7 +56474,10 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "GlobalNetworkEndpointGroupsDetachEndpointsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -49221,32 +56489,31 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.get": +// method id "compute.globalNetworkEndpointGroups.get": -type FirewallsGetCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsGetCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified firewall. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { + c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { +func (c *GlobalNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49256,7 +56523,7 @@ func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { +func (c *GlobalNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -49264,23 +56531,23 @@ func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { +func (c *GlobalNetworkEndpointGroupsGetCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49291,7 +56558,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -49299,20 +56566,20 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { +// Do executes the "compute.globalNetworkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49331,7 +56598,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Firewall{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49343,18 +56610,17 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.firewalls.get", + // "id": "compute.globalNetworkEndpointGroups.get", // "parameterOrder": [ // "project", - // "firewall" + // "networkEndpointGroup" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -49366,9 +56632,9 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Firewall" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49379,25 +56645,23 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } -// method id "compute.firewalls.insert": +// method id "compute.globalNetworkEndpointGroups.insert": -type FirewallsInsertCall struct { - s *Service - project string - firewall *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsInsertCall struct { + s *Service + project string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. (== suppress_warning http-rest-shadowed -// ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *GlobalNetworkEndpointGroupsService) Insert(project string, networkendpointgroup *NetworkEndpointGroup) *GlobalNetworkEndpointGroupsInsertCall { + c := &GlobalNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.networkendpointgroup = networkendpointgroup return c } @@ -49415,7 +56679,7 @@ func (r *FirewallsService) Insert(project string, firewall *Firewall) *Firewalls // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49423,7 +56687,7 @@ func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49431,36 +56695,36 @@ func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -49473,14 +56737,14 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.insert" call. +// Do executes the "compute.globalNetworkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49511,9 +56775,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.firewalls.insert", + // "id": "compute.globalNetworkEndpointGroups.insert", // "parameterOrder": [ // "project" // ], @@ -49531,9 +56795,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/networkEndpointGroups", // "request": { - // "$ref": "Firewall" + // "$ref": "NetworkEndpointGroup" // }, // "response": { // "$ref": "Operation" @@ -49546,9 +56810,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.list": +// method id "compute.globalNetworkEndpointGroups.list": -type FirewallsListCall struct { +type GlobalNetworkEndpointGroupsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -49557,11 +56821,10 @@ type FirewallsListCall struct { header_ http.Header } -// List: Retrieves the list of firewall rules available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project. +func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetworkEndpointGroupsListCall { + c := &GlobalNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -49570,36 +56833,37 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalNetworkEndpointGroupsListCall) Filter(filter string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -49609,22 +56873,23 @@ func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -49632,7 +56897,7 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49642,7 +56907,7 @@ func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -49650,23 +56915,23 @@ func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49677,7 +56942,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -49690,14 +56955,14 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { +// Do executes the "compute.globalNetworkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49716,7 +56981,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &FirewallList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49728,33 +56993,33 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } return ret, nil // { - // "description": "Retrieves the list of firewall rules available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project.", // "httpMethod": "GET", - // "id": "compute.firewalls.list", + // "id": "compute.globalNetworkEndpointGroups.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -49766,9 +57031,9 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/networkEndpointGroups", // "response": { - // "$ref": "FirewallList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49782,7 +57047,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { +func (c *GlobalNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -49800,233 +57065,95 @@ func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) err } } -// method id "compute.firewalls.patch": +// method id "compute.globalNetworkEndpointGroups.listNetworkEndpoints": -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.networkEndpointGroup = networkEndpointGroup return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { - c.ctx_ = ctx +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", - // "parameterOrder": [ - // "project", - // "firewall" - // ], - // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.firewalls.update": - -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified firewall rule with the data included in -// the request. Note that all fields will be updated if using PUT, even -// fields that are not specified. To update individual fields, please -// use PATCH instead. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - c.firewall2 = firewall2 +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { - c.urlParams_.Set("requestId", requestId) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50034,57 +57161,54 @@ func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalNetworkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50103,7 +57227,7 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50115,52 +57239,88 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.firewalls.update", + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.globalNetworkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ // "project", - // "firewall" + // "networkEndpointGroup" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ForwardingRulesAggregatedListCall struct { +// method id "compute.globalOperations.aggregatedList": + +type GlobalOperationsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -50169,11 +57329,10 @@ type ForwardingRulesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of forwarding rules. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -50182,36 +57341,50 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *GlobalOperationsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -50221,22 +57394,23 @@ func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *Forwar // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -50244,7 +57418,7 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50254,7 +57428,7 @@ func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *Forwar // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -50262,23 +57436,23 @@ func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *Forwa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50289,7 +57463,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -50302,14 +57476,14 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50328,7 +57502,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleAggregatedList{ + ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50340,33 +57514,38 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F } return ret, nil // { - // "description": "Retrieves an aggregated list of forwarding rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of all operations.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", + // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -50378,9 +57557,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "type": "string" // } // }, - // "path": "{project}/aggregated/forwardingRules", + // "path": "{project}/aggregated/operations", // "response": { - // "$ref": "ForwardingRuleAggregatedList" + // "$ref": "OperationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50394,7 +57573,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -50412,106 +57591,212 @@ func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*F } } -// method id "compute.forwardingRules.delete": +// method id "compute.globalOperations.delete": -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalOperations.get": + +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified Operations resource. Gets a list of +// operations by making a `list()` request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.delete" call. +// Do executes the "compute.globalOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50542,17 +57827,16 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a `list()` request.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "operation" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -50564,60 +57848,110 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.get": +// method id "compute.globalOperations.list": -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalOperationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified ForwardingRule resource. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50627,7 +57961,7 @@ func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -50635,23 +57969,23 @@ func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50662,7 +57996,7 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -50670,21 +58004,19 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned +// *OperationList.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50703,7 +58035,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50715,20 +58047,34 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } return ret, nil // { - // "description": "Returns the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of Operation resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", + // "id": "compute.globalOperations.list", // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" + // "project" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -50737,18 +58083,11 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/operations", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50759,53 +58098,63 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } -// method id "compute.forwardingRules.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.globalOperations.wait": -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsWaitCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. +// +// This method is called on a best-effort basis. Specifically: +// - In uncommon cases, when the server is overloaded, the request might +// return before the default deadline is reached, or might return after +// zero seconds. +// - If the default deadline is reached, there is no guarantee that the +// operation is actually done when the method returns. Be prepared to +// retry if the operation is not `DONE`. +func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { + c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingrule = forwardingrule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { +func (c *GlobalOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOperationsWaitCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50813,36 +58162,31 @@ func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { +func (c *GlobalOperationsWaitCall) Context(ctx context.Context) *GlobalOperationsWaitCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { +func (c *GlobalOperationsWaitCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50850,20 +58194,20 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.insert" call. +// Do executes the "compute.globalOperations.wait" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50894,69 +58238,58 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", + // "id": "compute.globalOperations.wait", // "parameterOrder": [ // "project", - // "region" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", - // "request": { - // "$ref": "ForwardingRule" - // }, + // "path": "{project}/global/operations/{operation}/wait", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.list": +// method id "compute.healthChecks.aggregatedList": -type ForwardingRulesListCall struct { +type HealthChecksAggregatedListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. (== suppress_warning http-rest-shadowed -// ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all HealthCheck resources, +// regional and global, available to the specified project. +func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { + c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -50964,36 +58297,50 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *HealthChecksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *HealthChecksAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -51003,22 +58350,23 @@ func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { +func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { +func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -51026,7 +58374,7 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { +func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51036,7 +58384,7 @@ func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { +func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -51044,23 +58392,23 @@ func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { +func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { +func (c *HealthChecksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51071,7 +58419,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -51080,19 +58428,18 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.aggregatedList" call. +// Exactly one of *HealthChecksAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *HealthChecksAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51111,7 +58458,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51123,55 +58470,52 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", + // "id": "compute.healthChecks.aggregatedList", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/aggregated/healthChecks", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "HealthChecksAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51185,7 +58529,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -51203,213 +58547,22 @@ func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingR } } -// method id "compute.forwardingRules.setTarget": - -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", - // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalAddresses.delete": +// method id "compute.healthChecks.delete": -type GlobalAddressesDeleteCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.healthCheck = healthCheck return c } @@ -51427,7 +58580,7 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51435,7 +58588,7 @@ func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51443,23 +58596,23 @@ func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51467,7 +58620,7 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -51475,20 +58628,20 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.delete" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51519,16 +58672,16 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified HealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ // "project", - // "address" + // "healthCheck" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -51547,7 +58700,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -51559,33 +58712,31 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.get": +// method id "compute.healthChecks.get": -type GlobalAddressesGetCall struct { +type HealthChecksGetCall struct { s *Service project string - address string + healthCheck string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51595,7 +58746,7 @@ func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -51603,23 +58754,23 @@ func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51630,7 +58781,7 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -51638,20 +58789,20 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51670,7 +58821,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51682,16 +58833,16 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "address" + // "healthCheck" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -51705,9 +58856,9 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "Address" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51718,25 +58869,23 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } -// method id "compute.globalAddresses.insert": +// method id "compute.healthChecks.insert": -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project by using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.healthcheck = healthcheck return c } @@ -51754,7 +58903,7 @@ func (r *GlobalAddressesService) Insert(project string, address *Address) *Globa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51762,7 +58911,7 @@ func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51770,36 +58919,36 @@ func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -51812,14 +58961,14 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.insert" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51850,9 +58999,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -51870,9 +59019,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/global/healthChecks", // "request": { - // "$ref": "Address" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -51885,9 +59034,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.list": +// method id "compute.healthChecks.list": -type GlobalAddressesListCall struct { +type HealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -51896,11 +59045,10 @@ type GlobalAddressesListCall struct { header_ http.Header } -// List: Retrieves a list of global addresses. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -51909,36 +59057,37 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -51948,22 +59097,23 @@ func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -51971,7 +59121,7 @@ func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51981,7 +59131,7 @@ func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -51989,23 +59139,23 @@ func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddresses // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52016,7 +59166,7 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -52029,14 +59179,14 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52055,7 +59205,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52067,33 +59217,33 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", + // "id": "compute.healthChecks.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -52105,9 +59255,9 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "AddressList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52121,7 +59271,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -52139,24 +59289,26 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } -// method id "compute.globalForwardingRules.delete": +// method id "compute.healthChecks.patch": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -52174,7 +59326,7 @@ func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52182,7 +59334,7 @@ func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52190,52 +59342,57 @@ func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52266,16 +59423,16 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified GlobalForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ // "project", - // "forwardingRule" + // "healthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -52294,7 +59451,10 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/global/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -52306,99 +59466,109 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.get": +// method id "compute.healthChecks.update": -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified GlobalForwardingRule resource. Gets a list -// of available forwarding rules by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { +func (c *HealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52417,7 +59587,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52429,16 +59599,16 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.healthChecks.update", // "parameterOrder": [ // "project", - // "forwardingRule" + // "healthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -52450,40 +59620,45 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/global/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { - // "$ref": "ForwardingRule" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalForwardingRules.insert": +// method id "compute.httpHealthChecks.delete": -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingrule = forwardingrule + c.httpHealthCheck = httpHealthCheck return c } @@ -52501,7 +59676,7 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52509,7 +59684,7 @@ func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52517,56 +59692,52 @@ func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.insert" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52597,13 +59768,21 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", + // "description": "Deletes the specified HttpHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ - // "project" + // "project", + // "httpHealthCheck" // ], // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -52617,10 +59796,7 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", - // "request": { - // "$ref": "ForwardingRule" - // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -52632,93 +59808,32 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.list": +// method id "compute.httpHealthChecks.get": -type GlobalForwardingRulesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of GlobalForwardingRule resources available to -// the specified project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Gets a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52728,7 +59843,7 @@ func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForw // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -52736,23 +59851,23 @@ func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52763,7 +59878,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -52771,19 +59886,20 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52802,7 +59918,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52814,34 +59930,19 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa } return ret, nil // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ - // "project" + // "project", + // "httpHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -52852,61 +59953,37 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", // "https://www.googleapis.com/auth/compute.readonly" // ] - // } - -} + // } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } } -// method id "compute.globalForwardingRules.setTarget": +// method id "compute.httpHealthChecks.insert": -type GlobalForwardingRulesSetTargetCall struct { +type HttpHealthChecksInsertCall struct { s *Service project string - forwardingRule string - targetreference *TargetReference + httphealthcheck *HttpHealthCheck urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.httphealthcheck = httphealthcheck return c } @@ -52924,7 +60001,7 @@ func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52932,7 +60009,7 @@ func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52940,36 +60017,36 @@ func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -52977,20 +60054,19 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setTarget" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53021,21 +60097,13 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ - // "project", - // "forwardingRule" + // "project" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53049,9 +60117,9 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "TargetReference" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -53064,9 +60132,9 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalOperations.aggregatedList": +// method id "compute.httpHealthChecks.list": -type GlobalOperationsAggregatedListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -53075,11 +60143,11 @@ type GlobalOperationsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -53088,36 +60156,37 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -53127,22 +60196,23 @@ func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *Globa // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -53150,7 +60220,7 @@ func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53160,7 +60230,7 @@ func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *Globa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -53168,23 +60238,23 @@ func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *Glob // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53195,7 +60265,7 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -53208,14 +60278,14 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was +// *HttpHealthCheckList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53234,7 +60304,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationAggregatedList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53246,33 +60316,33 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -53284,9 +60354,9 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/aggregated/operations", + // "path": "{project}/global/httpHealthChecks", // "response": { - // "$ref": "OperationAggregatedList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53300,7 +60370,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -53318,214 +60388,111 @@ func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.globalOperations.delete": +// method id "compute.httpHealthChecks.patch": -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Operations resource. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { - c.ctx_ = ctx + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified Operations resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", - // "parameterOrder": [ - // "project", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalOperations.get": - -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.operation = operation +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *HttpHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. +// Do executes the "compute.httpHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53556,252 +60523,19 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", - // "parameterOrder": [ - // "project", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/operations/{operation}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalOperations.list": - -type GlobalOperationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of Operation resources contained within the -// specified project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of Operation resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.globalOperations.list", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ - // "project" + // "project", + // "httpHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -53810,194 +60544,132 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, // "response": { - // "$ref": "OperationList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.aggregatedList": +// method id "compute.httpHealthChecks.update": -type HealthChecksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of all HealthCheck resources, -// regional and global, available to the specified project. (== -// suppress_warning http-rest-shadowed ==) -func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { - c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksAggregatedListCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.aggregatedList" call. -// Exactly one of *HealthChecksAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *HealthChecksAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { +// Do executes the "compute.httpHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54016,7 +60688,7 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthChecksAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54028,95 +60700,65 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal } return ret, nil // { - // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.healthChecks.aggregatedList", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ - // "project" + // "project", + // "httpHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/healthChecks", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, // "response": { - // "$ref": "HealthChecksAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": +// method id "compute.httpsHealthChecks.delete": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. (== -// suppress_warning http-rest-shadowed ==) -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.httpsHealthCheck = httpsHealthCheck return c } @@ -54134,7 +60776,7 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -54142,7 +60784,7 @@ func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDelete // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54150,23 +60792,23 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54174,7 +60816,7 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -54182,20 +60824,20 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. +// Do executes the "compute.httpsHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54226,16 +60868,16 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified HttpsHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -54254,7 +60896,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -54266,32 +60908,31 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.get": +// method id "compute.httpsHealthChecks.get": -type HealthChecksGetCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpsHealthCheck resource. Gets a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54301,7 +60942,7 @@ func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -54309,23 +60950,23 @@ func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *HttpsHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54336,7 +60977,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54344,20 +60985,20 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54376,7 +61017,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54388,16 +61029,16 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -54411,9 +61052,9 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54424,24 +61065,23 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } -// method id "compute.healthChecks.insert": +// method id "compute.httpsHealthChecks.insert": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.httpshealthcheck = httpshealthcheck return c } @@ -54459,7 +61099,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -54467,7 +61107,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54475,36 +61115,36 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *HttpsHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -54517,14 +61157,14 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.httpsHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54555,9 +61195,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -54575,9 +61215,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/httpsHealthChecks", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -54590,9 +61230,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.list": +// method id "compute.httpsHealthChecks.list": -type HealthChecksListCall struct { +type HttpsHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -54601,10 +61241,10 @@ type HealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -54613,36 +61253,37 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -54652,22 +61293,23 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -54675,7 +61317,7 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54685,7 +61327,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -54693,23 +61335,23 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54720,7 +61362,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54733,14 +61375,14 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54759,7 +61401,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54771,33 +61413,33 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -54809,9 +61451,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54825,7 +61467,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -54843,27 +61485,26 @@ func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckLis } } -// method id "compute.healthChecks.patch": +// method id "compute.httpsHealthChecks.patch": -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. -// (== suppress_warning http-rest-shadowed ==) -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -54881,7 +61522,7 @@ func (r *HealthChecksService) Patch(project string, healthCheck string, healthch // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -54889,7 +61530,7 @@ func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54897,36 +61538,36 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -54934,20 +61575,20 @@ func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54978,16 +61619,16 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55006,9 +61647,9 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -55021,26 +61662,25 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.healthChecks.update": +// method id "compute.httpsHealthChecks.update": -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -55058,7 +61698,7 @@ func (r *HealthChecksService) Update(project string, healthCheck string, healthc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55066,7 +61706,7 @@ func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdate // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55074,36 +61714,36 @@ func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -55111,20 +61751,20 @@ func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.update" call. +// Do executes the "compute.httpsHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55155,16 +61795,16 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55183,9 +61823,9 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -55198,24 +61838,23 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.httpHealthChecks.delete": +// method id "compute.images.delete": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.image = image return c } @@ -55233,7 +61872,7 @@ func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55241,7 +61880,7 @@ func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55249,23 +61888,23 @@ func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55273,7 +61912,7 @@ func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -55281,20 +61920,20 @@ func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.delete" call. +// Do executes the "compute.images.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55325,16 +61964,16 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified image.", // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", + // "id": "compute.images.delete", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "image" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", + // "image": { + // "description": "Name of the image resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55353,7 +61992,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images/{image}", // "response": { // "$ref": "Operation" // }, @@ -55365,99 +62004,112 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.get": +// method id "compute.images.deprecate": -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.image = image + c.deprecationstatus = deprecationstatus + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// Do executes the "compute.images.deprecate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55476,7 +62128,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55488,16 +62140,16 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "image" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", + // "image": { + // "description": "Image name.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55509,123 +62161,120 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images/{image}/deprecate", + // "request": { + // "$ref": "DeprecationStatus" + // }, // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpHealthChecks.insert": +// method id "compute.images.get": -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Gets a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55644,7 +62293,7 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55656,128 +62305,67 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", // "parameterOrder": [ - // "project" + // "project", + // "image" // ], // "parameters": { + // "image": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", - // "request": { - // "$ref": "HttpHealthCheck" - // }, + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.list": +// method id "compute.images.getFromFamily": -type HttpHealthChecksListCall struct { +type ImagesGetFromFamilyCall struct { s *Service project string + family string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55787,7 +62375,7 @@ func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthCheck // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { c.ifNoneMatch_ = entityTag return c } @@ -55795,23 +62383,23 @@ func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55822,7 +62410,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -55831,18 +62419,19 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55861,7 +62450,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55873,34 +62462,19 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ - // "project" + // "project", + // "family" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "family": { + // "description": "Name of the image family to search for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -55911,9 +62485,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/global/images/family/{family}", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -55924,133 +62498,97 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpHealthChecks.patch": +// method id "compute.images.getIamPolicy": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { + c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { +func (c *ImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56069,7 +62607,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56081,21 +62619,14 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.images.getIamPolicy", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "resource" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56103,48 +62634,52 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, + // "path": "{project}/global/images/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.update": +// method id "compute.images.insert": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.image = image + return c +} + +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } @@ -56162,7 +62697,7 @@ func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56170,7 +62705,7 @@ func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56178,57 +62713,56 @@ func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.update" call. +// Do executes the "compute.images.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56259,20 +62793,17 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", + // "description": "Creates an image in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.images.insert", // "parameterOrder": [ - // "project", - // "httpHealthCheck" + // "project" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" + // "forceCreate": { + // "description": "Force image creation if true.", + // "location": "query", + // "type": "boolean" // }, // "project": { // "description": "Project ID for this request.", @@ -56287,117 +62818,183 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "Image" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.httpsHealthChecks.delete": +// method id "compute.images.list": -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. (== -// suppress_warning http-rest-shadowed ==) -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *ImageList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56416,7 +63013,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56428,19 +63025,34 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "httpMethod": "GET", + // "id": "compute.images.list", // "parameterOrder": [ - // "project", - // "httpsHealthCheck" + // "project" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -56449,117 +63061,126 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/images", // "response": { - // "$ref": "Operation" + // "$ref": "ImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpsHealthChecks.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.images.setIamPolicy": + +type ImagesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { + c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *ImagesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +// Do executes the "compute.images.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56578,7 +63199,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56590,86 +63211,70 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.images.setIamPolicy", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "resource" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/images/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.images.setLabels": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56677,36 +63282,36 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *ImagesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56714,19 +63319,20 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.images.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56757,11 +63363,12 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.images.setLabels", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { // "project": { @@ -56771,15 +63378,17 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/global/images/{resource}/setLabels", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -56792,157 +63401,90 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.images.testIamPermissions": -type HttpsHealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. (== suppress_warning http-rest-shadowed ==) -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *ImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56961,7 +63503,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56973,47 +63515,35 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.images.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57024,48 +63554,42 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpsHealthChecks.patch": +// method id "compute.instanceGroupManagers.abandonInstances": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// (== suppress_warning http-rest-shadowed ==) -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Flags the specified instances to be removed from +// the managed instance group. Abandoning an instance does not delete +// the instance, but it does remove the instance from any target pools +// that are applied by the managed instance group. This method reduces +// the targetSize of the managed instance group by the number of +// instances that you abandon. This operation is marked as DONE when the +// action is scheduled even if the instances have not yet been removed +// from the group. You must separately verify the status of the +// abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -57083,7 +63607,7 @@ func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57091,7 +63615,7 @@ func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57099,57 +63623,58 @@ func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57180,18 +63705,18 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57206,11 +63731,17 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -57223,110 +63754,173 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.update": +// method id "compute.instanceGroupManagers.aggregatedList": -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstanceGroupManagersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57345,7 +63939,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57357,19 +63951,39 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ - // "project", - // "httpsHealthCheck" + // "project" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -57378,72 +63992,71 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/aggregated/instanceGroupManagers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagerAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.delete": - -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Delete: Deletes the specified image. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.image = image - return c +// method id "compute.instanceGroupManagers.applyUpdatesToInstances": + +type InstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// ApplyUpdatesToInstances: Applies changes to selected instances on the +// managed instance group. This method can be used to apply new +// overrides and/or new versions. +func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57451,52 +64064,58 @@ func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. +// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57527,18 +64146,18 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "description": "Applies changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57549,13 +64168,17 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "request": { + // "$ref": "InstanceGroupManagersApplyUpdatesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -57567,28 +64190,31 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.images.deprecate": +// method id "compute.instanceGroupManagers.createInstances": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. -// -// If an empty request body is given, clears the deprecation status -// instead. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateInstances: Creates instances with per-instance configs in this +// managed instance group. Instances are created using the current +// instance template. The create instances operation is marked DONE if +// the createInstances request is successful. The underlying actions +// take additional time. You must separately verify the status of the +// creating or actions with the listmanagedinstances method. +func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { + c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerscreateinstancesrequest = instancegroupmanagerscreateinstancesrequest return c } @@ -57601,12 +64227,11 @@ func (r *ImagesService) Deprecate(project string, image string, deprecationstatu // and the request times out. If you make the request again with the // same request ID, the server can check if original operation with the // same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// request. // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { +func (c *InstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57614,7 +64239,7 @@ func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57622,36 +64247,36 @@ func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *InstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersCreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { +func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerscreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57659,20 +64284,21 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. +// Do executes the "compute.instanceGroupManagers.createInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57703,18 +64329,18 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates instances with per-instance configs in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "id": "compute.instanceGroupManagers.createInstances", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "image": { - // "description": "Image name.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57726,14 +64352,20 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}/deprecate", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", // "request": { - // "$ref": "DeprecationStatus" + // "$ref": "InstanceGroupManagersCreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -57746,98 +64378,107 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.get": +// method id "compute.instanceGroupManagers.delete": -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57856,7 +64497,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57868,18 +64509,18 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.images.get", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57889,113 +64530,151 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getFromFamily": +// method id "compute.instanceGroupManagers.deleteInstances": -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. (== suppress_warning http-rest-shadowed -// ==) -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Flags the specified instances in the managed +// instance group for immediate deletion. The instances are also removed +// from any target pools of which they were a member. This method +// reduces the targetSize of the managed instance group by the number of +// instances that you delete. This operation is marked as DONE when the +// action is scheduled even if the instances are still being deleted. +// You must separately verify the status of the deleting action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.family = family + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58014,7 +64693,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58026,18 +64705,18 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "family" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -58047,47 +64726,62 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getIamPolicy": +// method id "compute.instanceGroupManagers.get": -type ImagesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { - c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. Gets a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58097,7 +64791,7 @@ func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolic // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -58105,23 +64799,23 @@ func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPoli // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58132,7 +64826,7 @@ func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -58140,20 +64834,21 @@ func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58172,7 +64867,7 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58184,14 +64879,21 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.images.getIamPolicy", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58199,17 +64901,16 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Policy" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58220,31 +64921,33 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } -// method id "compute.images.insert": +// method id "compute.instanceGroupManagers.insert": -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. +// +// A managed instance group can have up to 1000 VM instances per group. +// Please contact Cloud Support if you need an increase in this limit. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.zone = zone + c.instancegroupmanager = instancegroupmanager return c } @@ -58262,7 +64965,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58270,7 +64973,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58278,36 +64981,36 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58316,18 +65019,19 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58358,18 +65062,14 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", // "httpMethod": "POST", - // "id": "compute.images.insert", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58381,48 +65081,47 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "request": { - // "$ref": "Image" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.instanceGroupManagers.list": -type ImagesListCall struct { +type InstanceGroupManagersListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of custom images available to the specified -// project. Custom images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -58430,36 +65129,37 @@ func (r *ImagesService) List(project string) *ImagesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58469,22 +65169,23 @@ func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -58492,7 +65193,7 @@ func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58502,7 +65203,7 @@ func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -58510,23 +65211,23 @@ func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58537,7 +65238,7 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -58546,18 +65247,19 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58576,7 +65278,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58588,33 +65290,34 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.images.list", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -58624,11 +65327,17 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "ImageList" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58642,7 +65351,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -58660,244 +65369,166 @@ func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) er } } -// method id "compute.images.setIamPolicy": +// method id "compute.instanceGroupManagers.listErrors": -type ImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersListErrorsCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { - c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListErrors: Lists all errors thrown by actions on instances for a +// given managed instance group. +func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { + c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupManagersListErrorsCall) Filter(filter string) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { - c.ctx_ = ctx +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.images.setIamPolicy", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.setLabels": - -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. (== suppress_warning -// http-rest-shadowed ==) -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListErrorsCall) PageToken(pageToken string) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { +func (c *InstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListErrorsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListErrorsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { +func (c *InstanceGroupManagersListErrorsCall) Context(ctx context.Context) *InstanceGroupManagersListErrorsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { +func (c *InstanceGroupManagersListErrorsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.listErrors" call. +// Exactly one of *InstanceGroupManagersListErrorsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *InstanceGroupManagersListErrorsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListErrorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58916,7 +65547,7 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58928,14 +65559,44 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.images.setLabels", + // "description": "Lists all errors thrown by actions on instances for a given managed instance group.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.listErrors", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58943,55 +65604,142 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagersListErrorsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListErrorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.listManagedInstances": + +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58999,36 +65747,31 @@ func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59036,20 +65779,23 @@ func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59068,7 +65814,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59080,14 +65826,44 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -59095,20 +65871,16 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59119,42 +65891,53 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } -// method id "compute.instanceGroupManagers.abandonInstances": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.patch": + +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be removed from -// the managed instance group. Abandoning an instance does not delete -// the instance, but it does remove the instance from any target pools -// that are applied by the managed instance group. This method reduces -// the targetSize of the managed instance group by the number of -// instances that you abandon. This operation is marked as DONE when the -// action is scheduled even if the instances have not yet been removed -// from the group. You must separately verify the status of the -// abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.instancegroupmanager = instancegroupmanager return c } @@ -59172,7 +65955,7 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59180,7 +65963,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59188,38 +65971,38 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -59232,14 +66015,14 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.instanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59270,9 +66053,9 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ // "project", // "zone", @@ -59280,7 +66063,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" @@ -59298,15 +66081,15 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -59319,158 +66102,125 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": - -type InstanceGroupManagersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.instanceGroupManagers.recreateInstances": -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. // -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59489,7 +66239,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59501,81 +66251,58 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // }, // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instanceGroupManagers.resize": -type InstanceGroupManagersDeleteCall struct { +type InstanceGroupManagersResizeCall struct { s *Service project string zone string @@ -59585,15 +66312,33 @@ type InstanceGroupManagersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// When resizing down, the instance group arbitrarily chooses the order +// in which VMs are deleted. The group takes into account some VM +// attributes when making the selection including: +// +// + The status of the VM instance. + The health of the VM instance. + +// The instance template version the VM is based on. + For regional +// managed instance groups, the location of the VM instance. +// +// This list is subject to change. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -59611,7 +66356,7 @@ func (r *InstanceGroupManagersService) Delete(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59619,7 +66364,7 @@ func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59627,23 +66372,23 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59651,9 +66396,9 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -59666,14 +66411,14 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. +// Do executes the "compute.instanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59704,17 +66449,18 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroupManager", + // "size" // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -59731,6 +66477,13 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -59738,7 +66491,7 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -59747,44 +66500,31 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "https://www.googleapis.com/auth/compute" // ] // } - -} - -// method id "compute.instanceGroupManagers.deleteInstances": - -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + } -// DeleteInstances: Flags the specified instances in the managed -// instance group for immediate deletion. The instances are also removed -// from any target pools of which they were a member. This method -// reduces the targetSize of the managed instance group by the number of -// instances that you delete. This operation is marked as DONE when the -// action is scheduled even if the instances are still being deleted. -// You must separately verify the status of the deleting action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.instanceGroupManagers.setInstanceTemplate": + +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } @@ -59802,7 +66542,7 @@ func (r *InstanceGroupManagersService) DeleteInstances(project string, zone stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59810,7 +66550,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59818,36 +66558,36 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59862,14 +66602,14 @@ func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http. return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59900,9 +66640,9 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "zone", @@ -59934,9 +66674,9 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" // }, // "response": { // "$ref": "Operation" @@ -59949,81 +66689,97 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.get": +// method id "compute.instanceGroupManagers.setTargetPools": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -60036,14 +66792,14 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60062,7 +66818,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60074,9 +66830,9 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", // "zone", @@ -60096,6 +66852,11 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -60103,47 +66864,43 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.instanceGroups.addInstances": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -// Please contact Cloud Support if you need an increase in this limit. -// (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -60161,7 +66918,7 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60169,7 +66926,7 @@ func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60177,36 +66934,36 @@ func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -60214,20 +66971,21 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60258,14 +67016,21 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instanceGroup" // ], // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -60279,15 +67044,15 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceGroupsAddInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -60300,25 +67065,22 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.list": +// method id "compute.instanceGroups.aggregatedList": -type InstanceGroupManagersListCall struct { +type InstanceGroupsAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -60326,36 +67088,50 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstanceGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -60365,22 +67141,23 @@ func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGr // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -60388,7 +67165,7 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60398,7 +67175,7 @@ func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -60406,23 +67183,23 @@ func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60433,7 +67210,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -60442,19 +67219,18 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60473,7 +67249,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60485,34 +67261,38 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of instance groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -60522,17 +67302,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/aggregated/instanceGroups", // "response": { - // "$ref": "InstanceGroupManagerList" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60546,7 +67320,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -60564,100 +67338,53 @@ func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*Insta } } -// method id "compute.instanceGroupManagers.listManagedInstances": +// method id "compute.instanceGroups.delete": -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.instanceGroup = instanceGroup return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60665,23 +67392,23 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60689,31 +67416,29 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { +// Do executes the "compute.instanceGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60732,7 +67457,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListManagedInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60744,44 +67469,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group to delete.", // "location": "path", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -60789,137 +67491,124 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.patch": - -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.instanceGroups.get": -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { - c.urlParams_.Set("requestId", requestId) +// Get: Returns the specified instance group. Gets a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *InstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60938,7 +67627,7 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60950,17 +67639,17 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instanceGroup": { + // "description": "The name of the instance group.", // "location": "path", // "required": true, // "type": "string" @@ -60972,67 +67661,45 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.instanceGroups.insert": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.instancegroup = instancegroup return c } @@ -61050,7 +67717,7 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61058,7 +67725,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61066,36 +67733,36 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61103,21 +67770,20 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61148,21 +67814,14 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -61176,15 +67835,15 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where you want to create the instance group.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/zones/{zone}/instanceGroups", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "InstanceGroup" // }, // "response": { // "$ref": "Operation" @@ -61197,126 +67856,162 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.instanceGroups.list": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// When resizing down, the instance group arbitrarily chooses the order -// in which VMs are deleted. The group takes into account some VM -// attributes when making the selection including: +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// + The status of the VM instance. + The health of the VM instance. + -// The instance template version the VM is based on. + For regional -// managed instance groups, the location of the VM instance. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// This list is subject to change. +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -// (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61335,7 +68030,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61347,109 +68042,177 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager", - // "size" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, - // "type": "integer" + // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61457,36 +68220,36 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61494,21 +68257,21 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } - -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61527,7 +68290,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61539,21 +68302,44 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists the instances in the specified instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", // "location": "path", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -61561,60 +68347,75 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // "$ref": "InstanceGroupsListInstancesRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.setTargetPools": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration before the VM instance is removed or deleted. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest return c } @@ -61632,7 +68433,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61640,7 +68441,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61648,36 +68449,36 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61685,21 +68486,21 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instanceGroups.removeInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61730,17 +68531,17 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", // "required": true, // "type": "string" @@ -61758,15 +68559,15 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // "$ref": "InstanceGroupsRemoveInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -61779,29 +68580,26 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instanceGroups.setNamedPorts": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest return c } @@ -61819,7 +68617,7 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61827,7 +68625,7 @@ func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61835,36 +68633,36 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61879,14 +68677,14 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.instanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61917,9 +68715,9 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the named ports for the specified instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "zone", @@ -61927,7 +68725,7 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", + // "description": "The name of the instance group where the named ports are updated.", // "location": "path", // "required": true, // "type": "string" @@ -61951,9 +68749,9 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "InstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -61966,157 +68764,104 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.instanceTemplates.delete": -type InstanceGroupsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. It is not possible to +// delete templates that are already in use by a managed instance group. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.instanceTemplate = instanceTemplate return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +// Do executes the "compute.instanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62135,7 +68880,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62147,34 +68892,19 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ - // "project" + // "project", + // "instanceTemplate" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -62183,143 +68913,117 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroups", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.delete": +// method id "compute.instanceTemplates.get": -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance template. Gets a list of +// available instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.instanceTemplate = instanceTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { +func (c *InstanceTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62338,7 +69042,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62350,80 +69054,67 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "instanceTemplate" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", + // "instanceTemplate": { + // "description": "The name of the instance template.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.get": +// method id "compute.instanceTemplates.getIamPolicy": -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceTemplatesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { + c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62433,7 +69124,7 @@ func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { +func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -62441,23 +69132,23 @@ func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { +func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62468,7 +69159,7 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62476,21 +69167,20 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// Do executes the "compute.instanceTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62509,7 +69199,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62521,21 +69211,14 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", + // "id": "compute.instanceTemplates.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "resource" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62543,16 +69226,17 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62563,26 +69247,27 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } -// method id "compute.instanceGroups.insert": +// method id "compute.instanceTemplates.insert": -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instancegroup = instancegroup + c.instancetemplate = instancetemplate return c } @@ -62600,7 +69285,7 @@ func (r *InstanceGroupsService) Insert(project string, zone string, instancegrou // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62608,7 +69293,7 @@ func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62616,36 +69301,36 @@ func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { +func (c *InstanceTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62654,19 +69339,18 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. +// Do executes the "compute.instanceTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62697,12 +69381,11 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", + // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -62716,17 +69399,11 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the instance group.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/global/instanceTemplates", // "request": { - // "$ref": "InstanceGroup" + // "$ref": "InstanceTemplate" // }, // "response": { // "$ref": "Operation" @@ -62739,25 +69416,23 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.list": +// method id "compute.instanceTemplates.list": -type InstanceGroupsListCall struct { +type InstanceTemplatesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. (== suppress_warning http-rest-shadowed -// ==) -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of instance templates that are contained +// within the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -62765,36 +69440,37 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -62804,22 +69480,23 @@ func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsLis // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -62827,7 +69504,7 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62837,7 +69514,7 @@ func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -62845,23 +69522,23 @@ func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62872,7 +69549,7 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62881,19 +69558,18 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was +// *InstanceTemplateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62912,7 +69588,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62924,34 +69600,33 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of instance templates that are contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -62961,17 +69636,11 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/global/instanceTemplates", // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62985,7 +69654,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -63003,97 +69672,32 @@ func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGrou } } -// method id "compute.instanceGroups.listInstances": +// method id "compute.instanceTemplates.setIamPolicy": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -// (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { + c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63101,36 +69705,36 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63138,21 +69742,20 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.instanceTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63171,7 +69774,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63183,44 +69786,14 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "id": "compute.instanceTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63228,76 +69801,205 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { +// method id "compute.instanceTemplates.testIamPermissions": + +type InstanceTemplatesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() } - if x.NextPageToken == "" { - return nil + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, } - c.PageToken(x.NextPageToken) } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + } -// method id "compute.instanceGroups.removeInstances": +// method id "compute.instances.addAccessConfig": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -63315,7 +70017,7 @@ func (r *InstanceGroupsService) RemoveInstances(project string, zone string, ins // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63323,7 +70025,7 @@ func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *Instanc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63331,36 +70033,36 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63368,21 +70070,21 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. +// Do executes the "compute.instances.addAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63413,18 +70115,26 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds an access config to an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance", + // "networkInterface" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -63441,15 +70151,16 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -63462,27 +70173,28 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.instances.addResourcePolicies": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddResourcePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. -// (== suppress_warning http-rest-shadowed ==) -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddResourcePolicies: Adds existing resource policies to an instance. +// You can only add one policy right now which will be applied to this +// instance for scheduling live migrations. +func (r *InstancesService) AddResourcePolicies(project string, zone string, instance string, instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest) *InstancesAddResourcePoliciesCall { + c := &InstancesAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + c.instance = instance + c.instancesaddresourcepoliciesrequest = instancesaddresourcepoliciesrequest return c } @@ -63500,7 +70212,7 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesAddResourcePoliciesCall) RequestId(requestId string) *InstancesAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63508,7 +70220,7 @@ func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63516,36 +70228,36 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesAddResourcePoliciesCall) Context(ctx context.Context) *InstancesAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InstancesAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63553,21 +70265,21 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.instances.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63598,18 +70310,19 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.instances.addResourcePolicies", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -63626,15 +70339,16 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/zones/{zone}/instances/{instance}/addResourcePolicies", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "InstancesAddResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -63647,105 +70361,173 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceTemplates.delete": +// method id "compute.instances.aggregatedList": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. Deleting an instance -// template is permanent and cannot be undone. It is not possible to -// delete templates that are already in use by a managed instance group. -// (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves aggregated list of all of the instances in +// your project across all regions and zones. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstancesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstancesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63764,7 +70546,7 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63776,19 +70558,39 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "instanceTemplate" + // "project" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -63797,118 +70599,159 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/aggregated/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceTemplates.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// ForceAttach sets the optional parameter "forceAttach": Whether to +// force attach the disk even if it's currently attached to another +// instance. +func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { + c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.instances.attachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63927,7 +70770,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63939,16 +70782,22 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", + // "httpMethod": "POST", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "forceAttach": { + // "description": "Whether to force attach the disk even if it's currently attached to another instance.", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -63960,113 +70809,135 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + // "request": { + // "$ref": "AttachedDisk" + // }, // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.getIamPolicy": +// method id "compute.instances.delete": -type InstanceTemplatesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { - c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64085,7 +70956,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64097,14 +70968,22 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.getIamPolicy", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64112,49 +70991,53 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.insert": +// method id "compute.instances.deleteAccessConfig": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) return c } @@ -64172,7 +71055,7 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -64180,7 +71063,7 @@ func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64188,36 +71071,31 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -64225,19 +71103,21 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.instances.deleteAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64268,13 +71148,36 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes an access config from an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" // ], // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64286,12 +71189,16 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", - // "request": { - // "$ref": "InstanceTemplate" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", // "response": { // "$ref": "Operation" // }, @@ -64303,159 +71210,106 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.instances.detachDisk": -type InstanceTemplatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project. (== suppress_warning http-rest-shadowed -// ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64474,7 +71328,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplateList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64486,163 +71340,156 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "description": "Detaches a disk from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance", + // "deviceName" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "deviceName": { + // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", // "location": "query", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" + // "instance": { + // "description": "Instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceTemplates.setIamPolicy": +// method id "compute.instances.get": -type InstanceTemplatesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { - c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Instance resource. Gets a list of +// available instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64661,7 +71508,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64673,14 +71520,22 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.setIamPolicy", + // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64688,113 +71543,134 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "Policy" + // "$ref": "Instance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceTemplates.testIamPermissions": +// method id "compute.instances.getGuestAttributes": + +type InstancesGetGuestAttributesCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetGuestAttributes: Returns the specified guest attributes entry. +func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { + c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// QueryPath sets the optional parameter "queryPath": Specifies the +// guest attributes path to be queried. +func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("queryPath", queryPath) + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// VariableKey sets the optional parameter "variableKey": Specifies the +// key for the guest attributes entry. +func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("variableKey", variableKey) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesGetGuestAttributesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instances.getGuestAttributes" call. +// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *GuestAttributes.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64813,7 +71689,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64825,14 +71701,22 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", + // "description": "Returns the specified guest attributes entry.", + // "httpMethod": "GET", + // "id": "compute.instances.getGuestAttributes", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64840,20 +71724,27 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "queryPath": { + // "description": "Specifies the guest attributes path to be queried.", + // "location": "query", + // "type": "string" + // }, + // "variableKey": { + // "description": "Specifies the key for the guest attributes entry.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "GuestAttributes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64864,94 +71755,80 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instances.addAccessConfig": +// method id "compute.instances.getIamPolicy": -type InstancesAddAccessConfigCall struct { +type InstancesGetIamPolicyCall struct { s *Service project string zone string - instance string - accessconfig *AccessConfig + resource string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { + c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { +func (c *InstancesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -64959,19 +71836,19 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64990,7 +71867,7 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65002,29 +71879,15 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65032,9 +71895,11 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -65045,109 +71910,65 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, + // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.aggregatedList": +// method id "compute.instances.getSerialPortOutput": -type InstancesAggregatedListCall struct { +type InstancesGetSerialPortOutputCall struct { s *Service project string + zone string + instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves aggregated list of all of the instances in -// your project across all regions and zones. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetSerialPortOutput: Returns the last 1 MB of serial port output from +// the specified instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Start sets the optional parameter "start": Returns output starting +// from a specific byte position. Use this to page through output when +// the output is too large to return in a single request. For the +// initial request, leave this field unspecified. For subsequent calls, +// this field should be set to the next value returned in the previous +// call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65157,7 +71978,7 @@ func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { c.ifNoneMatch_ = entityTag return c } @@ -65165,23 +71986,23 @@ func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65192,7 +72013,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -65200,19 +72021,21 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// *SerialPortOutput.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65231,7 +72054,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65243,47 +72066,55 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the last 1 MB of serial port output from the specified instance.", // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", // "location": "query", - // "minimum": "0", + // "maximum": "4", + // "minimum": "1", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "start": { + // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "format": "int64", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65294,125 +72125,80 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.attachDisk": +// method id "compute.instances.getShieldedInstanceIdentity": -type InstancesAttachDiskCall struct { +type InstancesGetShieldedInstanceIdentityCall struct { s *Service project string zone string instance string - attacheddisk *AttachedDisk urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity +// of an instance +func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { + c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.attacheddisk = attacheddisk - return c -} - -// ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. -func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { - c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -65425,14 +72211,14 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getShieldedInstanceIdentity" call. +// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65451,7 +72237,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65463,22 +72249,17 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "description": "Returns the Shielded Instance Identity of an instance", + // "httpMethod": "GET", + // "id": "compute.instances.getShieldedInstanceIdentity", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance.", - // "location": "query", - // "type": "boolean" - // }, // "instance": { - // "description": "The instance name for this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -65486,14 +72267,9 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // }, // "project": { // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, // "zone": { @@ -65504,39 +72280,36 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - // "request": { - // "$ref": "AttachedDisk" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", // "response": { - // "$ref": "Operation" + // "$ref": "ShieldedInstanceIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.delete": +// method id "compute.instances.insert": -type InstancesDeleteCall struct { +type InstancesInsertCall struct { s *Service project string zone string - instance string + instance *Instance urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance @@ -65557,15 +72330,30 @@ func (r *InstancesService) Delete(project string, zone string, instance string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate +// - projects/project/global/instanceTemplates/instanceTemplate +// - global/instanceTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65573,53 +72361,57 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. +// Do executes the "compute.instances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65650,22 +72442,14 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "zone" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65678,6 +72462,11 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "location": "query", // "type": "string" // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -65686,7 +72475,10 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instances", + // "request": { + // "$ref": "Instance" + // }, // "response": { // "$ref": "Operation" // }, @@ -65698,108 +72490,163 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.instances.list": -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *InstanceList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65818,7 +72665,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65830,34 +72677,35 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "description": "Retrieves the list of instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "zone" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", - // "required": true, // "type": "string" // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface.", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", - // "required": true, // "type": "string" // }, // "project": { @@ -65867,11 +72715,6 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -65880,99 +72723,180 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/zones/{zone}/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.detachDisk": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.listReferrers": + +type InstancesListReferrersCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListReferrers: Retrieves the list of referrers to instances contained +// within the specified zone. For more information, read Viewing +// Referrers to VM Instances. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("deviceName", deviceName) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -65985,14 +72909,14 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceListReferrers.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66011,7 +72935,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66023,29 +72947,45 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "httpMethod": "GET", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "deviceName" + // "instance" // ], // "parameters": { - // "deviceName": { - // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", - // "required": true, // "type": "string" // }, // "instance": { - // "description": "Instance name for this request.", + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -66053,11 +72993,6 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -66066,94 +73001,125 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instances.removeResourcePolicies": + +type InstancesRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveResourcePolicies: Removes resource policies from an instance. +func (r *InstancesService) RemoveResourcePolicies(project string, zone string, instance string, instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest) *InstancesRemoveResourcePoliciesCall { + c := &InstancesRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.instancesremoveresourcepoliciesrequest = instancesremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesRemoveResourcePoliciesCall) RequestId(requestId string) *InstancesRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *InstancesRemoveResourcePoliciesCall) Context(ctx context.Context) *InstancesRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -66166,14 +73132,14 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.removeResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66192,7 +73158,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66204,9 +73170,9 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.get", + // "description": "Removes resource policies from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.removeResourcePolicies", // "parameterOrder": [ // "project", // "zone", @@ -66214,7 +73180,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to return.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -66227,6 +73193,11 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -66235,107 +73206,102 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", + // "request": { + // "$ref": "InstancesRemoveResourcePoliciesRequest" + // }, // "response": { - // "$ref": "Instance" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getGuestAttributes": +// method id "compute.instances.reset": -type InstancesGetGuestAttributesCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetGuestAttributes: Returns the specified guest attributes entry. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { - c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Reset: Performs a reset on the instance. This is a hard reset the VM +// does not do a graceful shutdown. For more information, see Resetting +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance return c } -// QueryPath sets the optional parameter "queryPath": Specifies the -// guest attributes path to be queried. -func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("queryPath", queryPath) - return c -} - -// VariableKey sets the optional parameter "variableKey": Specifies the -// key for the guest attributes entry. -func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("variableKey", variableKey) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetGuestAttributesCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -66348,14 +73314,14 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getGuestAttributes" call. -// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *GuestAttributes.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66374,7 +73340,7 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &GuestAttributes{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66386,9 +73352,9 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } return ret, nil // { - // "description": "Returns the specified guest attributes entry. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.getGuestAttributes", + // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", // "zone", @@ -66409,13 +73375,8 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue // "required": true, // "type": "string" // }, - // "queryPath": { - // "description": "Specifies the guest attributes path to be queried.", - // "location": "query", - // "type": "string" - // }, - // "variableKey": { - // "description": "Specifies the key for the guest attributes entry.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, @@ -66427,94 +73388,103 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "path": "{project}/zones/{zone}/instances/{instance}/reset", // "response": { - // "$ref": "GuestAttributes" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getIamPolicy": +// method id "compute.instances.setDeletionProtection": -type InstancesGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { - c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.resource = resource return c } +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetIamPolicyCall) Header() http.Header { +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -66527,14 +73497,14 @@ func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setDeletionProtection" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66553,7 +73523,7 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66565,15 +73535,21 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.getIamPolicy", + // "description": "Sets deletion protection on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ // "project", // "zone", // "resource" // ], // "parameters": { + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -66581,6 +73557,11 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", @@ -66596,112 +73577,100 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.instances.setDiskAutoDelete": -type InstancesGetSerialPortOutputCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetSerialPortOutput: Returns the last 1 MB of serial port output from -// the specified instance. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) return c } -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c -} - -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -66714,14 +73683,14 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +// Do executes the "compute.instances.setDiskAutoDelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66740,7 +73709,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66752,31 +73721,37 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the last 1 MB of serial port output from the specified instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "autoDelete", + // "deviceName" // ], // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -66784,9 +73759,8 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, @@ -66798,93 +73772,85 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getShieldedInstanceIdentity": +// method id "compute.instances.setIamPolicy": -type InstancesGetShieldedInstanceIdentityCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity -// of an instance (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { - c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { + c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { +func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { +func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { +func (c *InstancesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -66892,19 +73858,19 @@ func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http. googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getShieldedInstanceIdentity" call. -// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { +// Do executes the "compute.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66923,7 +73889,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ShieldedInstanceIdentity{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66935,26 +73901,26 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Returns the Shielded Instance Identity of an instance (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.getShieldedInstanceIdentity", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name or id of the instance scoping this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -66966,40 +73932,42 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", + // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "ShieldedInstanceIdentity" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.insert": +// method id "compute.instances.setLabels": -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest return c } @@ -67017,30 +73985,15 @@ func (r *InstancesService) Insert(project string, zone string, instance *Instanc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceInstanceTemplate sets the optional parameter -// "sourceInstanceTemplate": Specifies instance template to create the -// instance. -// -// This field is optional. It can be a full or partial URL. For example, -// the following are all valid URLs to an instance template: -// - -// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate -// - projects/project/global/instanceTemplates/instanceTemplate -// - global/instanceTemplates/instanceTemplate -func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { - c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67048,36 +74001,36 @@ func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67085,20 +74038,21 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. +// Do executes the "compute.instances.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67129,14 +74083,22 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "id": "compute.instances.setLabels", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67149,11 +74111,6 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "location": "query", // "type": "string" // }, - // "sourceInstanceTemplate": { - // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -67162,9 +74119,9 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", // "request": { - // "$ref": "Instance" + // "$ref": "InstancesSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -67177,161 +74134,112 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.list": +// method id "compute.instances.setMachineResources": -type InstancesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.setMachineResources" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67350,7 +74258,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67362,35 +74270,20 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.list", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -67400,6 +74293,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -67408,179 +74306,107 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "request": { + // "$ref": "InstancesSetMachineResourcesRequest" + // }, // "response": { - // "$ref": "InstanceList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.listReferrers": +// method id "compute.instances.setMachineType": -type InstancesListReferrersCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. For more information, read Viewing -// Referrers to VM Instances. (== suppress_warning http-rest-shadowed -// ==) -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -67593,14 +74419,14 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +// Do executes the "compute.instances.setMachineType" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67619,7 +74445,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceListReferrers{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67631,45 +74457,22 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67677,6 +74480,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -67685,61 +74493,43 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + // "request": { + // "$ref": "InstancesSetMachineTypeRequest" + // }, // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": +// method id "compute.instances.setMetadata": -type InstancesResetCall struct { +type InstancesSetMetadataCall struct { s *Service project string zone string instance string + metadata *Metadata urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Reset: Performs a reset on the instance. This is a hard reset the VM -// does not do a graceful shutdown. For more information, see Resetting -// an instance. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.metadata = metadata return c } @@ -67757,7 +74547,7 @@ func (r *InstancesService) Reset(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67765,7 +74555,7 @@ func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67773,31 +74563,36 @@ func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67812,14 +74607,14 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. +// Do executes the "compute.instances.setMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67850,9 +74645,9 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets metadata for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ // "project", // "zone", @@ -67886,7 +74681,10 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { // "$ref": "Operation" // }, @@ -67898,32 +74696,29 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.setDeletionProtection": +// method id "compute.instances.setMinCpuPlatform": -type InstancesSetDeletionProtectionCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDeletionProtection: Sets deletion protection on the instance. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { - c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - return c -} - -// DeletionProtection sets the optional parameter "deletionProtection": -// Whether the resource should be protected against deletion. -func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { - c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } @@ -67941,7 +74736,7 @@ func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtecti // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67949,7 +74744,7 @@ func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *Instan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67957,31 +74752,36 @@ func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *Insta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDeletionProtectionCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67991,19 +74791,19 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDeletionProtection" call. +// Do executes the "compute.instances.setMinCpuPlatform" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68034,20 +74834,21 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets deletion protection on the instance. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", // "httpMethod": "POST", - // "id": "compute.instances.setDeletionProtection", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { - // "deletionProtection": { - // "default": "true", - // "description": "Whether the resource should be protected against deletion.", - // "location": "query", - // "type": "boolean" + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -68061,13 +74862,6 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -68076,7 +74870,10 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "request": { + // "$ref": "InstancesSetMinCpuPlatformRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -68088,28 +74885,27 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.instances.setScheduling": -type InstancesSetDiskAutoDeleteCall struct { +type InstancesSetSchedulingCall struct { s *Service project string zone string instance string + scheduling *Scheduling urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) + c.scheduling = scheduling return c } @@ -68127,7 +74923,7 @@ func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68135,7 +74931,7 @@ func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68143,31 +74939,36 @@ func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68182,14 +74983,14 @@ func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68220,32 +75021,17 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets an instance's scheduling options.", // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "instance" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, // "instance": { - // "description": "The instance name for this request.", + // "description": "Instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68271,7 +75057,10 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, // "response": { // "$ref": "Operation" // }, @@ -68283,35 +75072,54 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setIamPolicy": +// method id "compute.instances.setServiceAccount": -type InstancesSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { - c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68319,36 +75127,36 @@ func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetIamPolicyCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68358,19 +75166,19 @@ func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setServiceAccount" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68389,7 +75197,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68401,15 +75209,22 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setIamPolicy", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -68417,11 +75232,9 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -68432,12 +75245,12 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "InstancesSetServiceAccountRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -68447,28 +75260,29 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } -// method id "compute.instances.setLabels": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy return c } @@ -68486,7 +75300,7 @@ func (r *InstancesService) SetLabels(project string, zone string, instance strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68494,7 +75308,7 @@ func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabels // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68502,38 +75316,38 @@ func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -68546,14 +75360,14 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68584,9 +75398,9 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", // "zone", @@ -68594,7 +75408,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68620,9 +75434,9 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", // "request": { - // "$ref": "InstancesSetLabelsRequest" + // "$ref": "ShieldedInstanceIntegrityPolicy" // }, // "response": { // "$ref": "Operation" @@ -68635,28 +75449,28 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.instances.setMachineResources": +// method id "compute.instances.setTags": -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. (== -// suppress_warning http-rest-shadowed ==) -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTags: Sets network tags for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest + c.tags = tags return c } @@ -68674,7 +75488,7 @@ func (r *InstancesService) SetMachineResources(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68682,7 +75496,7 @@ func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68690,36 +75504,36 @@ func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68734,14 +75548,14 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. +// Do executes the "compute.instances.setTags" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68772,9 +75586,9 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets network tags for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "id": "compute.instances.setTags", // "parameterOrder": [ // "project", // "zone", @@ -68808,9 +75622,9 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" + // "$ref": "Tags" // }, // "response": { // "$ref": "Operation" @@ -68823,28 +75637,179 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.setMachineType": +// method id "compute.instances.simulateMaintenanceEvent": -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SimulateMaintenanceEvent: Simulates a maintenance event on the +// instance. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Simulates a maintenance event on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.start": + +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Start: Starts an instance that was stopped using the instances().stop +// method. For more information, see Restart an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest return c } @@ -68862,7 +75827,7 @@ func (r *InstancesService) SetMachineType(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68870,7 +75835,7 @@ func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetM // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68878,36 +75843,31 @@ func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68922,14 +75882,14 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. +// Do executes the "compute.instances.start" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68960,9 +75920,9 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "id": "compute.instances.start", // "parameterOrder": [ // "project", // "zone", @@ -68970,7 +75930,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68996,10 +75956,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - // "request": { - // "$ref": "InstancesSetMachineTypeRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { // "$ref": "Operation" // }, @@ -69011,28 +75968,28 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instances.setMetadata": +// method id "compute.instances.startWithEncryptionKey": -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// instances().stop method. For more information, see Restart an +// instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.metadata = metadata + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } @@ -69050,7 +76007,7 @@ func (r *InstancesService) SetMetadata(project string, zone string, instance str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69058,7 +76015,7 @@ func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMeta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69066,36 +76023,36 @@ func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69110,14 +76067,14 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. +// Do executes the "compute.instances.startWithEncryptionKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69148,9 +76105,9 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", // "zone", @@ -69158,7 +76115,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69184,9 +76141,9 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "Metadata" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -69199,29 +76156,30 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.setMinCpuPlatform": +// method id "compute.instances.stop": -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } @@ -69239,7 +76197,7 @@ func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69247,7 +76205,7 @@ func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69255,36 +76213,31 @@ func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69299,14 +76252,14 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. +// Do executes the "compute.instances.stop" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69337,9 +76290,9 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform. (== suppress_warning http-rest-shadowed ==)", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", // "zone", @@ -69347,7 +76300,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to stop.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69373,10 +76326,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", - // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { // "$ref": "Operation" // }, @@ -69388,54 +76338,34 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setScheduling": +// method id "compute.instances.testIamPermissions": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.scheduling = scheduling - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69443,36 +76373,36 @@ func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69482,19 +76412,19 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69513,7 +76443,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69525,22 +76455,15 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69548,9 +76471,11 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -69561,43 +76486,77 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", // "request": { - // "$ref": "Scheduling" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setServiceAccount": +// method id "compute.instances.update": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateCall struct { + s *Service + project string + zone string + instance string + instance2 *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates an instance only if the necessary resources are +// available. This method can update only a specific set of instance +// properties. See Updating a running instance for a list of updatable +// instance properties. +func (r *InstancesService) Update(project string, zone string, instance string, instance2 *Instance) *InstancesUpdateCall { + c := &InstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + c.instance2 = instance2 + return c +} + +// MinimalAction sets the optional parameter "minimalAction": Specifies +// the action to take when updating an instance even if the updated +// properties do not require it. If not specified, then Compute Engine +// acts based on the minimum action that the updated properties require. +// +// Possible values: +// "INVALID" +// "NO_EFFECT" +// "REFRESH" +// "RESTART" +func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpdateCall { + c.urlParams_.Set("minimalAction", minimalAction) + return c +} + +// MostDisruptiveAllowedAction sets the optional parameter +// "mostDisruptiveAllowedAction": Specifies the most disruptive action +// that can be taken on the instance as part of the update. Compute +// Engine returns an error if the instance properties require a more +// disruptive action as part of the instance update. Valid options from +// lowest to highest are NO_EFFECT, REFRESH, and RESTART. +// +// Possible values: +// "INVALID" +// "NO_EFFECT" +// "REFRESH" +// "RESTART" +func (c *InstancesUpdateCall) MostDisruptiveAllowedAction(mostDisruptiveAllowedAction string) *InstancesUpdateCall { + c.urlParams_.Set("mostDisruptiveAllowedAction", mostDisruptiveAllowedAction) return c } @@ -69615,7 +76574,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { +func (c *InstancesUpdateCall) RequestId(requestId string) *InstancesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69623,7 +76582,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *InstancesUpdateCall) Fields(s ...googleapi.Field) *InstancesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69631,38 +76590,38 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *InstancesUpdateCall) Context(ctx context.Context) *InstancesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *InstancesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -69675,14 +76634,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. +// Do executes the "compute.instances.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69713,9 +76672,9 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Updates an instance only if the necessary resources are available. This method can update only a specific set of instance properties. See Updating a running instance for a list of updatable instance properties.", + // "httpMethod": "PUT", + // "id": "compute.instances.update", // "parameterOrder": [ // "project", // "zone", @@ -69723,12 +76682,46 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to start.", + // "description": "Name of the instance resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "minimalAction": { + // "description": "Specifies the action to take when updating an instance even if the updated properties do not require it. If not specified, then Compute Engine acts based on the minimum action that the updated properties require.", + // "enum": [ + // "INVALID", + // "NO_EFFECT", + // "REFRESH", + // "RESTART" + // ], + // "enumDescriptions": [ + // "", + // "", + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "mostDisruptiveAllowedAction": { + // "description": "Specifies the most disruptive action that can be taken on the instance as part of the update. Compute Engine returns an error if the instance properties require a more disruptive action as part of the instance update. Valid options from lowest to highest are NO_EFFECT, REFRESH, and RESTART.", + // "enum": [ + // "INVALID", + // "NO_EFFECT", + // "REFRESH", + // "RESTART" + // ], + // "enumDescriptions": [ + // "", + // "", + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69749,9 +76742,9 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "path": "{project}/zones/{zone}/instances/{instance}", // "request": { - // "$ref": "InstancesSetServiceAccountRequest" + // "$ref": "Instance" // }, // "response": { // "$ref": "Operation" @@ -69764,30 +76757,30 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setShieldedInstanceIntegrityPolicy": +// method id "compute.instances.updateAccessConfig": -type InstancesSetShieldedInstanceIntegrityPolicyCall struct { - s *Service - project string - zone string - instance string - shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance -// integrity policy for an instance. You can only use this method on a -// running instance. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { - c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -69805,7 +76798,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69813,7 +76806,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69821,38 +76814,38 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -69865,14 +76858,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. +// Do executes the "compute.instances.updateAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69903,22 +76896,29 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "POST", + // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { - // "description": "Name or id of the instance scoping this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69939,9 +76939,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", // "request": { - // "$ref": "ShieldedInstanceIntegrityPolicy" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -69954,28 +76954,29 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } -// method id "compute.instances.setTags": +// method id "compute.instances.updateDisplayDevice": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateDisplayDeviceCall struct { + s *Service + project string + zone string + instance string + displaydevice *DisplayDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateDisplayDevice: Updates the Display config for a VM instance. +// You can only use this method on a stopped VM instance. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { + c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.tags = tags + c.displaydevice = displaydevice return c } @@ -69993,7 +76994,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70001,7 +77002,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70009,38 +77010,38 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -70053,14 +77054,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.instances.updateDisplayDevice" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70091,9 +77092,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateDisplayDevice", // "parameterOrder": [ // "project", // "zone", @@ -70103,7 +77104,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "instance": { // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -70127,9 +77128,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", // "request": { - // "$ref": "Tags" + // "$ref": "DisplayDevice" // }, // "response": { // "$ref": "Operation" @@ -70142,32 +77143,54 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.instances.updateNetworkInterface": -type InstancesSimulateMaintenanceEventCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SimulateMaintenanceEvent: Simulates a maintenance event on the -// instance. (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateNetworkInterface: Updates an instance's network interface. This +// method follows PATCH semantics. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70175,33 +77198,38 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -70214,14 +77242,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Do executes the "compute.instances.updateNetworkInterface" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70252,22 +77280,29 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a maintenance event on the instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", + // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface to update.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70275,6 +77310,11 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -70283,7 +77323,10 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "request": { + // "$ref": "NetworkInterface" + // }, // "response": { // "$ref": "Operation" // }, @@ -70295,27 +77338,29 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } -// method id "compute.instances.start": +// method id "compute.instances.updateShieldedInstanceConfig": -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedInstanceConfigCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceconfig *ShieldedInstanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateShieldedInstanceConfig: Updates the Shielded Instance config +// for an instance. You can only use this method on a stopped instance. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { + c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.shieldedinstanceconfig = shieldedinstanceconfig return c } @@ -70333,7 +77378,7 @@ func (r *InstancesService) Start(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70341,7 +77386,7 @@ func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70349,33 +77394,38 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -70388,14 +77438,14 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. +// Do executes the "compute.instances.updateShieldedInstanceConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70426,9 +77476,9 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.start", + // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedInstanceConfig", // "parameterOrder": [ // "project", // "zone", @@ -70436,7 +77486,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to start.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -70462,7 +77512,10 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", + // "request": { + // "$ref": "ShieldedInstanceConfig" + // }, // "response": { // "$ref": "Operation" // }, @@ -70474,113 +77527,173 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.interconnectAttachments.aggregatedList": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// instances().stop method. For more information, see Restart an -// instance. (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70599,7 +77712,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70611,82 +77724,101 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" - // }, + // "path": "{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.stop": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnectAttachments.delete": + +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur VM usage charges while they are stopped. However, resources -// that the VM is using, such as persistent disks and static IP -// addresses, will continue to be charged until they are deleted. For -// more information, see Stopping an instance. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.interconnectAttachment = interconnectAttachment return c } @@ -70704,7 +77836,7 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70712,7 +77844,7 @@ func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70720,23 +77852,23 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70744,29 +77876,29 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. +// Do executes the "compute.interconnectAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70797,17 +77929,17 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "description": "Deletes the specified interconnect attachment.", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -70820,20 +77952,20 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { // "$ref": "Operation" // }, @@ -70845,93 +77977,99 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.instances.testIamPermissions": +// method id "compute.interconnectAttachments.get": -type InstancesTestIamPermissionsCall struct { +type InterconnectAttachmentsGetCall struct { s *Service project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest + region string + interconnectAttachment string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *InterconnectAttachment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70950,7 +78088,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70962,43 +78100,40 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "description": "Returns the specified interconnect attachment.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -71009,31 +78144,25 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.instances.updateAccessConfig": +// method id "compute.interconnectAttachments.insert": -type InstancesUpdateAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateAccessConfig: Updates the specified access config from an -// instance's network interface with the data included in the request. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. (== suppress_warning http-rest-shadowed -// ==) -func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { - c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.region = region + c.interconnectattachment = interconnectattachment return c } @@ -71051,15 +78180,22 @@ func (r *InstancesService) UpdateAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71067,36 +78203,36 @@ func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateAccessConfigCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -71104,21 +78240,20 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateAccessConfig" call. +// Do executes the "compute.interconnectAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71149,33 +78284,25 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.updateAccessConfig", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface where the access config is attached.", - // "location": "query", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -71184,17 +78311,15 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "path": "{project}/regions/{region}/interconnectAttachments", // "request": { - // "$ref": "AccessConfig" + // "$ref": "InterconnectAttachment" // }, // "response": { // "$ref": "Operation" @@ -71207,114 +78332,162 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.updateDisplayDevice": +// method id "compute.interconnectAttachments.list": -type InstancesUpdateDisplayDeviceCall struct { - s *Service - project string - zone string - instance string - displaydevice *DisplayDevice - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdateDisplayDevice: Updates the Display config for a VM instance. -// You can only use this method on a stopped VM instance. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. (== suppress_warning http-rest-shadowed ==) -func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { - c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.displaydevice = displaydevice + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateDisplayDevice" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71333,7 +78506,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71345,20 +78518,35 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateDisplayDevice", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -71368,57 +78556,70 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", - // "request": { - // "$ref": "DisplayDevice" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.updateNetworkInterface": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesUpdateNetworkInterfaceCall struct { - s *Service - project string - zone string - instance string - networkinterface *NetworkInterface - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnectAttachments.patch": + +type InterconnectAttachmentsPatchCall struct { + s *Service + project string + region string + interconnectAttachment string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateNetworkInterface: Updates an instance's network interface. This -// method follows PATCH semantics. (== suppress_warning -// http-rest-shadowed ==) -func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { - c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect attachment with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { + c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.networkinterface = networkinterface + c.region = region + c.interconnectAttachment = interconnectAttachment + c.interconnectattachment = interconnectattachment return c } @@ -71436,7 +78637,7 @@ func (r *InstancesService) UpdateNetworkInterface(project string, zone string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { +func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71444,7 +78645,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { +func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71452,36 +78653,36 @@ func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { +func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { +func (c *InterconnectAttachmentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -71489,21 +78690,21 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateNetworkInterface" call. +// Do executes the "compute.interconnectAttachments.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71534,29 +78735,22 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method follows PATCH semantics. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.instances.updateNetworkInterface", + // "id": "compute.interconnectAttachments.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to update.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71564,22 +78758,22 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "request": { - // "$ref": "NetworkInterface" + // "$ref": "InterconnectAttachment" // }, // "response": { // "$ref": "Operation" @@ -71592,115 +78786,98 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.updateShieldedInstanceConfig": +// method id "compute.interconnectLocations.get": -type InstancesUpdateShieldedInstanceConfigCall struct { - s *Service - project string - zone string - instance string - shieldedinstanceconfig *ShieldedInstanceConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdateShieldedInstanceConfig: Updates the Shielded Instance config -// for an instance. You can only use this method on a stopped instance. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. (== suppress_warning http-rest-shadowed -// ==) -func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { - c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the details for the specified interconnect location. +// Gets a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.shieldedinstanceconfig = shieldedinstanceconfig - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { - c.urlParams_.Set("requestId", requestId) + c.interconnectLocation = interconnectLocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { +func (c *InterconnectLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnectLocation": c.interconnectLocation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateShieldedInstanceConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71719,7 +78896,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71731,17 +78908,16 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateShieldedInstanceConfig", + // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnectLocation" // ], // "parameters": { - // "instance": { - // "description": "Name or id of the instance scoping this request.", + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -71753,38 +78929,24 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", - // "request": { - // "$ref": "ShieldedInstanceConfig" - // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectAttachments.aggregatedList": +// method id "compute.interconnectLocations.list": -type InterconnectAttachmentsAggregatedListCall struct { +type InterconnectLocationsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -71793,10 +78955,10 @@ type InterconnectAttachmentsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of interconnect -// attachments. (== suppress_warning http-rest-shadowed ==) -func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { - c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -71805,36 +78967,37 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -71844,22 +79007,23 @@ func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -71867,7 +79031,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71877,7 +79041,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -71885,23 +79049,23 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { +func (c *InterconnectLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71912,7 +79076,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -71925,15 +79089,14 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.aggregatedList" call. -// Exactly one of *InterconnectAttachmentAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71952,7 +79115,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentAggregatedList{ + ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71964,33 +79127,33 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of interconnect locations available to the specified project.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.aggregatedList", + // "id": "compute.interconnectLocations.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -72002,9 +79165,9 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "type": "string" // } // }, - // "path": "{project}/aggregated/interconnectAttachments", + // "path": "{project}/global/interconnectLocations", // "response": { - // "$ref": "InterconnectAttachmentAggregatedList" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72018,7 +79181,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -72036,25 +79199,22 @@ func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f } } -// method id "compute.interconnectAttachments.delete": +// method id "compute.interconnects.delete": -type InterconnectAttachmentsDeleteCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect attachment. (== -// suppress_warning http-rest-shadowed ==) -func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { - c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.interconnect = interconnect return c } @@ -72072,7 +79232,7 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72080,7 +79240,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72088,23 +79248,23 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72112,7 +79272,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -72120,21 +79280,20 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.delete" call. +// Do executes the "compute.interconnects.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72165,17 +79324,16 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified interconnect.", // "httpMethod": "DELETE", - // "id": "compute.interconnectAttachments.delete", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "interconnect" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to delete.", + // "interconnect": { + // "description": "Name of the interconnect to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -72188,20 +79346,13 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { // "$ref": "Operation" // }, @@ -72213,33 +79364,31 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.get": +// method id "compute.interconnects.get": -type InterconnectAttachmentsGetCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect attachment. (== -// suppress_warning http-rest-shadowed ==) -func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { - c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72249,7 +79398,7 @@ func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *Interconn // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -72257,23 +79406,23 @@ func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *Intercon // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetCall) Header() http.Header { +func (c *InterconnectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72284,7 +79433,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -72292,21 +79441,20 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.get" call. -// Exactly one of *InterconnectAttachment or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectAttachment.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Interconnect.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72325,7 +79473,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachment{ + ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72337,17 +79485,16 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.get", + // "id": "compute.interconnects.get", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "interconnect" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to return.", + // "interconnect": { + // "description": "Name of the interconnect to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -72359,18 +79506,169 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Interconnect" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnects.getDiagnostics": + +type InterconnectsGetDiagnosticsCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetDiagnostics: Returns the interconnectDiagnostics for the specified +// interconnect. +func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { + c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.getDiagnostics" call. +// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectsGetDiagnosticsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the interconnectDiagnostics for the specified interconnect.", + // "httpMethod": "GET", + // "id": "compute.interconnects.getDiagnostics", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect resource to query.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", // "response": { - // "$ref": "InterconnectAttachment" + // "$ref": "InterconnectsGetDiagnosticsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72381,26 +79679,23 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } -// method id "compute.interconnectAttachments.insert": +// method id "compute.interconnects.insert": -type InterconnectAttachmentsInsertCall struct { - s *Service - project string - region string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { - c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectattachment = interconnectattachment + c.interconnect = interconnect return c } @@ -72418,7 +79713,7 @@ func (r *InterconnectAttachmentsService) Insert(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72426,7 +79721,7 @@ func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72434,36 +79729,36 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsInsertCall) Header() http.Header { +func (c *InterconnectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -72472,19 +79767,18 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.insert" call. +// Do executes the "compute.interconnects.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72515,12 +79809,11 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a Interconnect in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.insert", + // "id": "compute.interconnects.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -72530,22 +79823,15 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/global/interconnects", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -72558,24 +79844,22 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.list": +// method id "compute.interconnects.list": -type InterconnectAttachmentsListCall struct { +type InterconnectsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of interconnect attachments contained within -// the specified region. (== suppress_warning http-rest-shadowed ==) -func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { - c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -72583,36 +79867,37 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -72622,22 +79907,23 @@ func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *Intercon // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -72645,7 +79931,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72655,7 +79941,7 @@ func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *Intercon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { c.ifNoneMatch_ = entityTag return c } @@ -72663,23 +79949,23 @@ func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsListCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72690,7 +79976,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -72699,19 +79985,18 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.list" call. -// Exactly one of *InterconnectAttachmentList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectAttachmentList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72730,7 +80015,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentList{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72742,34 +80027,33 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of interconnect available to the specified project.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.list", + // "id": "compute.interconnects.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -72779,18 +80063,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/global/interconnects", // "response": { - // "$ref": "InterconnectAttachmentList" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72804,7 +80081,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -72822,29 +80099,26 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int } } -// method id "compute.interconnectAttachments.patch": +// method id "compute.interconnects.patch": -type InterconnectAttachmentsPatchCall struct { - s *Service - project string - region string - interconnectAttachment string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect attachment with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. (== -// suppress_warning http-rest-shadowed ==) -func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { - c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment - c.interconnectattachment = interconnectattachment + c.interconnect = interconnect + c.interconnect2 = interconnect2 return c } @@ -72862,7 +80136,7 @@ func (r *InterconnectAttachmentsService) Patch(project string, region string, in // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72870,7 +80144,7 @@ func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *Intercon // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72878,36 +80152,36 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsPatchCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -72915,21 +80189,20 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.patch" call. +// Do executes the "compute.interconnects.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72960,17 +80233,16 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.interconnectAttachments.patch", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "interconnect" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to patch.", + // "interconnect": { + // "description": "Name of the interconnect to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -72983,22 +80255,15 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/interconnects/{interconnect}", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -73011,32 +80276,33 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.interconnectLocations.get": +// method id "compute.licenseCodes.get": -type InterconnectLocationsGetCall struct { - s *Service - project string - interconnectLocation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicenseCodesGetCall struct { + s *Service + project string + licenseCode string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the details for the specified interconnect location. -// Gets a list of available interconnect locations by making a list() -// request. (== suppress_warning http-rest-shadowed ==) -func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { - c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +// Caution This resource is intended for use only by third-party +// partners who are creating Cloud Marketplace images. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnectLocation = interconnectLocation + c.licenseCode = licenseCode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73046,7 +80312,7 @@ func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *Interconnec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -73054,23 +80320,23 @@ func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *Interconne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsGetCall) Header() http.Header { +func (c *LicenseCodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73081,7 +80347,7 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -73089,20 +80355,20 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnectLocation": c.interconnectLocation, + "project": c.project, + "licenseCode": c.licenseCode, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.get" call. -// Exactly one of *InterconnectLocation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectLocation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LicenseCode.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73121,7 +80387,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocation{ + ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73133,18 +80399,18 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "GET", - // "id": "compute.interconnectLocations.get", + // "id": "compute.licenseCodes.get", // "parameterOrder": [ // "project", - // "interconnectLocation" + // "licenseCode" // ], // "parameters": { - // "interconnectLocation": { - // "description": "Name of the interconnect location to return.", + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[0-9]{0,61}?", // "required": true, // "type": "string" // }, @@ -73156,9 +80422,9 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "path": "{project}/global/licenseCodes/{licenseCode}", // "response": { - // "$ref": "InterconnectLocation" + // "$ref": "LicenseCode" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73169,157 +80435,91 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } -// method id "compute.interconnectLocations.list": +// method id "compute.licenseCodes.testIamPermissions": -type InterconnectLocationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicenseCodesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect locations available to the -// specified project. (== suppress_warning http-rest-shadowed ==) -func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { - c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. Caution This resource is intended for use only +// by third-party partners who are creating Cloud Marketplace images. +func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { + c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { +func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { +func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsListCall) Header() http.Header { +func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.list" call. -// Exactly one of *InterconnectLocationList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectLocationList.ServerResponse.Header or (if a response was +// Do executes the "compute.licenseCodes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { +func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73338,7 +80538,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocationList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73350,47 +80550,35 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } return ret, nil // { - // "description": "Retrieves the list of interconnect locations available to the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.interconnectLocations.list", + // "description": "Returns permissions that a caller has on the specified resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", + // "httpMethod": "POST", + // "id": "compute.licenseCodes.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations", + // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InterconnectLocationList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73401,44 +80589,24 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnects.delete": +// method id "compute.licenses.delete": -type InterconnectsDeleteCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesDeleteCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect. (== suppress_warning -// http-rest-shadowed ==) -func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { - c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified license. Caution This resource is +// intended for use only by third-party partners who are creating Cloud +// Marketplace images. +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.license = license return c } @@ -73456,7 +80624,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73464,7 +80632,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73472,23 +80640,23 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsDeleteCall) Header() http.Header { +func (c *LicensesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73496,7 +80664,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -73504,20 +80672,20 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.delete" call. +// Do executes the "compute.licenses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73548,16 +80716,16 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified license. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "DELETE", - // "id": "compute.interconnects.delete", + // "id": "compute.licenses.delete", // "parameterOrder": [ // "project", - // "interconnect" + // "license" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to delete.", + // "license": { + // "description": "Name of the license resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -73576,7 +80744,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/global/licenses/{license}", // "response": { // "$ref": "Operation" // }, @@ -73588,32 +80756,33 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.get": +// method id "compute.licenses.get": -type InterconnectsGetCall struct { +type LicensesGetCall struct { s *Service project string - interconnect string + license string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { - c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified License resource. Caution This resource +// is intended for use only by third-party partners who are creating +// Cloud Marketplace images. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73623,7 +80792,7 @@ func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -73631,23 +80800,23 @@ func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73658,7 +80827,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -73666,20 +80835,20 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.get" call. -// Exactly one of *Interconnect or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Interconnect.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73698,7 +80867,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Interconnect{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73710,16 +80879,16 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified License resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "GET", - // "id": "compute.interconnects.get", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "interconnect" + // "license" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to return.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -73733,9 +80902,9 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/global/licenses/{license}", // "response": { - // "$ref": "Interconnect" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73746,31 +80915,33 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } -// method id "compute.interconnects.getDiagnostics": +// method id "compute.licenses.getIamPolicy": -type InterconnectsGetDiagnosticsCall struct { +type LicensesGetIamPolicyCall struct { s *Service project string - interconnect string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. (== suppress_warning http-rest-shadowed ==) -func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { - c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. Caution This resource is +// intended for use only by third-party partners who are creating Cloud +// Marketplace images. +func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { + c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73780,7 +80951,7 @@ func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *Intercon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { +func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -73788,23 +80959,23 @@ func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +func (c *LicensesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73815,7 +80986,7 @@ func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -73823,21 +80994,20 @@ func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.getDiagnostics" call. -// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +// Do executes the "compute.licenses.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73856,7 +81026,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectsGetDiagnosticsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73868,32 +81038,32 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "GET", - // "id": "compute.interconnects.getDiagnostics", + // "id": "compute.licenses.getIamPolicy", // "parameterOrder": [ // "project", - // "interconnect" + // "resource" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect resource to query.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", + // "path": "{project}/global/licenses/{resource}/getIamPolicy", // "response": { - // "$ref": "InterconnectsGetDiagnosticsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73904,24 +81074,24 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } -// method id "compute.interconnects.insert": - -type InterconnectsInsertCall struct { - s *Service - project string - interconnect *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.licenses.insert": + +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the -// data included in the request. (== suppress_warning http-rest-shadowed -// ==) -func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { - c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Create a License resource in the specified project. Caution +// This resource is intended for use only by third-party partners who +// are creating Cloud Marketplace images. +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.license = license return c } @@ -73939,7 +81109,7 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73947,7 +81117,7 @@ func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73955,36 +81125,36 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsInsertCall) Header() http.Header { +func (c *LicensesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -73997,14 +81167,14 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.insert" call. +// Do executes the "compute.licenses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74035,9 +81205,9 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Create a License resource in the specified project. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "POST", - // "id": "compute.interconnects.insert", + // "id": "compute.licenses.insert", // "parameterOrder": [ // "project" // ], @@ -74055,24 +81225,27 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/global/licenses", // "request": { - // "$ref": "Interconnect" + // "$ref": "License" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.interconnects.list": +// method id "compute.licenses.list": -type InterconnectsListCall struct { +type LicensesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -74081,10 +81254,16 @@ type InterconnectsListCall struct { header_ http.Header } -// List: Retrieves the list of interconnect available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -func (r *InterconnectsService) List(project string) *InterconnectsListCall { - c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 9. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. Caution This +// resource is intended for use only by third-party partners who are +// creating Cloud Marketplace images. +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -74093,36 +81272,37 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -74132,22 +81312,23 @@ func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListC // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -74155,7 +81336,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74165,7 +81346,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { c.ifNoneMatch_ = entityTag return c } @@ -74173,23 +81354,23 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsListCall) Header() http.Header { +func (c *LicensesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74200,7 +81381,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -74213,14 +81394,14 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.list" call. -// Exactly one of *InterconnectList or error will be non-nil. Any +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectList.ServerResponse.Header or (if a response was +// *LicensesListResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74239,7 +81420,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectList{ + ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74251,33 +81432,33 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "GET", - // "id": "compute.interconnects.list", + // "id": "compute.licenses.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -74289,9 +81470,9 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/global/licenses", // "response": { - // "$ref": "InterconnectList" + // "$ref": "LicensesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74305,7 +81486,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -74323,53 +81504,34 @@ func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectL } } -// method id "compute.interconnects.patch": +// method id "compute.licenses.setIamPolicy": -type InterconnectsPatchCall struct { - s *Service - project string - interconnect string - interconnect2 *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. (== suppress_warning -// http-rest-shadowed ==) -func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { - c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. Caution This resource is +// intended for use only by third-party partners who are creating Cloud +// Marketplace images. +func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { + c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - c.interconnect2 = interconnect2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { +func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74377,57 +81539,57 @@ func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { +func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsPatchCall) Header() http.Header { +func (c *LicensesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74446,7 +81608,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74458,21 +81620,14 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.interconnects.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", + // "httpMethod": "POST", + // "id": "compute.licenses.setIamPolicy", // "parameterOrder": [ // "project", - // "interconnect" + // "resource" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -74480,18 +81635,20 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/global/licenses/{resource}/setIamPolicy", // "request": { - // "$ref": "Interconnect" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74501,167 +81658,9 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.licenseCodes.get": - -type LicenseCodesGetCall struct { - s *Service - project string - licenseCode string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Return a specified license code. License codes are mirrored -// across all projects that have permissions to read the License Code. -// (== suppress_warning http-rest-shadowed ==) -func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { - c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.licenseCode = licenseCode - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *LicenseCodesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "licenseCode": c.licenseCode, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.licenseCodes.get" call. -// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LicenseCode.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LicenseCode{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.licenseCodes.get", - // "parameterOrder": [ - // "project", - // "licenseCode" - // ], - // "parameters": { - // "licenseCode": { - // "description": "Number corresponding to the License code resource to return.", - // "location": "path", - // "pattern": "[0-9]{0,61}?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/licenseCodes/{licenseCode}", - // "response": { - // "$ref": "LicenseCode" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.licenseCodes.testIamPermissions": +// method id "compute.licenses.testIamPermissions": -type LicenseCodesTestIamPermissionsCall struct { +type LicensesTestIamPermissionsCall struct { s *Service project string resource string @@ -74672,9 +81671,10 @@ type LicenseCodesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { - c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// specified resource. Caution This resource is intended for use only +// by third-party partners who are creating Cloud Marketplace images. +func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { + c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -74684,7 +81684,7 @@ func (r *LicenseCodesService) TestIamPermissions(project string, resource string // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { +func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74692,23 +81692,23 @@ func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Licen // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { +func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { +func (c *LicensesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74721,7 +81721,7 @@ func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Respon reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -74735,14 +81735,14 @@ func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.testIamPermissions" call. +// Do executes the "compute.licenses.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74773,9 +81773,9 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource. Caution This resource is intended for use only by third-party partners who are creating Cloud Marketplace images.", // "httpMethod": "POST", - // "id": "compute.licenseCodes.testIamPermissions", + // "id": "compute.licenses.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -74796,7 +81796,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", + // "path": "{project}/global/licenses/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -74812,102 +81812,172 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.licenses.delete": +// method id "compute.machineTypes.aggregatedList": -type LicensesDeleteCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified license. (== suppress_warning -// http-rest-shadowed ==) -func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { - c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesDeleteCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74926,7 +81996,7 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74937,20 +82007,40 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error return nil, err } return ret, nil - // { - // "description": "Deletes the specified license. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.licenses.delete", + // { + // "description": "Retrieves an aggregated list of machine types.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "license" + // "project" // ], // "parameters": { - // "license": { - // "description": "Name of the license resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -74959,51 +82049,70 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicensesGetCall struct { +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { s *Service project string - license string + zone string + machineType string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified License resource. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Gets a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.zone = zone + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75013,7 +82122,7 @@ func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -75021,23 +82130,23 @@ func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *MachineTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75048,7 +82157,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -75056,20 +82165,21 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "license": c.license, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75088,7 +82198,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75100,16 +82210,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", - // "license" + // "zone", + // "machineType" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", + // "machineType": { + // "description": "Name of the machine type to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -75121,11 +82232,18 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "License" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75136,32 +82254,97 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } -// method id "compute.licenses.getIamPolicy": +// method id "compute.machineTypes.list": -type LicensesGetIamPolicyCall struct { +type MachineTypesListCall struct { s *Service project string - resource string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { - c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75171,7 +82354,7 @@ func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamP // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -75179,23 +82362,23 @@ func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetIamPolicyCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75206,7 +82389,7 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -75214,20 +82397,20 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75246,7 +82429,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75258,14 +82441,37 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of machine types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.licenses.getIamPolicy", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75273,17 +82479,17 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "Policy" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75294,88 +82500,175 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } -// method id "compute.licenses.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicensesInsertCall struct { - s *Service - project string - license *License - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networkEndpointGroups.aggregatedList": + +type NetworkEndpointGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Create a License resource in the specified project. (== -// suppress_warning http-rest-shadowed ==) -func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { - c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesInsertCall) Header() http.Header { +func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -75386,14 +82679,15 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75412,7 +82706,7 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75424,200 +82718,189 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.licenses.insert", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/licenses", - // "request": { - // "$ref": "License" - // }, + // "path": "{project}/aggregated/networkEndpointGroups", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.list": - -type LicensesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// List: Retrieves the list of licenses available in the specified -// project. This method does not get any licenses that belong to other -// projects, including licenses attached to publicly-available images, -// like Debian 9. If you want to get a list of publicly-available -// licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. (== suppress_warning -// http-rest-shadowed ==) -func (r *LicensesService) List(project string) *LicensesListCall { - c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} +// method id "compute.networkEndpointGroups.attachNetworkEndpoints": -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *LicensesListCall) Filter(filter string) *LicensesListCall { - c.urlParams_.Set("filter", filter) - return c +type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesListCall) Header() http.Header { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.list" call. -// Exactly one of *LicensesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *LicensesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { +// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75636,7 +82919,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicensesListResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75648,105 +82931,102 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.licenses.list", + // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // }, // "response": { - // "$ref": "LicensesListResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.licenses.setIamPolicy": +// method id "compute.networkEndpointGroups.delete": -type LicensesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDeleteCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { - c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. The network +// endpoints in the NEG and the VM instances they belong to are not +// terminated when the NEG is deleted. Note that the NEG cannot be +// deleted if there are backend services referencing it. +func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { + c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { +func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75754,57 +83034,53 @@ func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { +func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesSetIamPolicyCall) Header() http.Header { +func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networkEndpointGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75823,7 +83099,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75835,14 +83111,21 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.licenses.setIamPolicy", + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + // "httpMethod": "DELETE", + // "id": "compute.networkEndpointGroups.delete", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75850,20 +83133,21 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75873,32 +83157,53 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } -// method id "compute.licenses.testIamPermissions": +// method id "compute.networkEndpointGroups.detachNetworkEndpoints": -type LicensesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { - c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach a list of network endpoints from the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75906,36 +83211,36 @@ func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesTestIamPermissionsCall) Header() http.Header { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -75943,20 +83248,21 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75975,7 +83281,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75987,14 +83293,21 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Detach a list of network endpoints from the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.licenses.testIamPermissions", + // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76002,117 +83315,60 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.aggregatedList": +// method id "compute.networkEndpointGroups.get": -type MachineTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsGetCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. (== -// suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { + c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76122,7 +83378,7 @@ func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTy // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -76130,23 +83386,23 @@ func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *NetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76157,7 +83413,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -76165,19 +83421,21 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76196,46 +83454,31 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of machine types. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.get", + // "parameterOrder": [ + // "project", + // "zone", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -76244,11 +83487,17 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76259,123 +83508,109 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.machineTypes.get": +// method id "compute.networkEndpointGroups.insert": -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsInsertCall struct { + s *Service + project string + zone string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { + c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.machineType = machineType + c.networkendpointgroup = networkendpointgroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { +func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { +func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { +func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// Do executes the "compute.networkEndpointGroups.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { +func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76394,7 +83629,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76406,22 +83641,14 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ // "project", - // "zone", - // "machineType" + // "zone" // ], // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76429,30 +83656,36 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "request": { + // "$ref": "NetworkEndpointGroup" + // }, // "response": { - // "$ref": "MachineType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.list": +// method id "compute.networkEndpointGroups.list": -type MachineTypesListCall struct { +type NetworkEndpointGroupsListCall struct { s *Service project string zone string @@ -76462,11 +83695,10 @@ type MachineTypesListCall struct { header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project and zone. +func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { + c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c @@ -76476,36 +83708,37 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -76515,22 +83748,23 @@ func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCal // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -76538,7 +83772,7 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76548,7 +83782,7 @@ func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -76556,23 +83790,23 @@ func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76583,7 +83817,7 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -76597,14 +83831,14 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76623,7 +83857,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76635,34 +83869,34 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ // "project", // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -76674,16 +83908,15 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "response": { - // "$ref": "MachineTypeList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76697,7 +83930,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { +func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -76715,22 +83948,27 @@ func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeLis } } -// method id "compute.networkEndpointGroups.aggregatedList": +// method id "compute.networkEndpointGroups.listNetworkEndpoints": -type NetworkEndpointGroupsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. (== suppress_warning http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { - c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { + c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest return c } @@ -76738,36 +83976,37 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -76777,22 +84016,23 @@ func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) * // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -76800,73 +84040,68 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.aggregatedList" call. -// Exactly one of *NetworkEndpointGroupAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or // (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. -func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76885,7 +84120,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupAggregatedList{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76897,33 +84132,41 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.aggregatedList", + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "networkEndpointGroup" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -76933,11 +84176,20 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/networkEndpointGroups", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsListEndpointsRequest" + // }, // "response": { - // "$ref": "NetworkEndpointGroupAggregatedList" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76951,7 +84203,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -76969,54 +84221,34 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f f } } -// method id "compute.networkEndpointGroups.attachNetworkEndpoints": +// method id "compute.networkEndpointGroups.testIamPermissions": -type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachNetworkEndpoints: Attach a list of network endpoints to the -// specified network endpoint group. (== suppress_warning -// http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { - c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { + c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77024,36 +84256,36 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -77061,21 +84293,21 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77094,7 +84326,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77106,21 +84338,15 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Attach a list of network endpoints to the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "id": "compute.networkEndpointGroups.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "networkEndpointGroup" + // "resource" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -77128,55 +84354,55 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", // "request": { - // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networkEndpointGroups.delete": +// method id "compute.networks.addPeering": -type NetworkEndpointGroupsDeleteCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksAddPeeringCall struct { + s *Service + project string + network string + networksaddpeeringrequest *NetworksAddPeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network endpoint group. The network -// endpoints in the NEG and the VM instances they belong to are not -// terminated when the NEG is deleted. Note that the NEG cannot be -// deleted if there are backend services referencing it. (== -// suppress_warning http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { - c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddPeering: Adds a peering to the specified network. +func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { + c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.network = network + c.networksaddpeeringrequest = networksaddpeeringrequest return c } @@ -77194,7 +84420,7 @@ func (r *NetworkEndpointGroupsService) Delete(project string, zone string, netwo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77202,7 +84428,7 @@ func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { +func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77210,53 +84436,57 @@ func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { +func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { +func (c *NetworksAddPeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.delete" call. +// Do executes the "compute.networks.addPeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77287,18 +84517,18 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.networkEndpointGroups.delete", + // "description": "Adds a peering to the specified network.", + // "httpMethod": "POST", + // "id": "compute.networks.addPeering", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "network" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "network": { + // "description": "Name of the network resource to add peering to.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77313,15 +84543,12 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "{project}/global/networks/{network}/addPeering", + // "request": { + // "$ref": "NetworksAddPeeringRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -77333,28 +84560,23 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.networkEndpointGroups.detachNetworkEndpoints": +// method id "compute.networks.delete": -type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachNetworkEndpoints: Detach a list of network endpoints from the -// specified network endpoint group. (== suppress_warning -// http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { - c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + c.network = network return c } @@ -77372,7 +84594,7 @@ func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77380,7 +84602,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77388,58 +84610,52 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { +func (c *NetworksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +// Do executes the "compute.networks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77470,18 +84686,18 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Detach a list of network endpoints from the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "network" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77496,18 +84712,9 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - // "request": { - // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" - // }, + // "path": "{project}/global/networks/{network}", // "response": { // "$ref": "Operation" // }, @@ -77519,34 +84726,32 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } -// method id "compute.networkEndpointGroups.get": +// method id "compute.networks.get": -type NetworkEndpointGroupsGetCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksGetCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. (== -// suppress_warning http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { - c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network. Gets a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77556,7 +84761,7 @@ func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndp // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -77564,23 +84769,23 @@ func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEnd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsGetCall) Header() http.Header { +func (c *NetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77591,7 +84796,7 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -77599,21 +84804,20 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.get" call. -// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroup.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77632,7 +84836,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroup{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77644,18 +84848,18 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.get", + // "id": "compute.networks.get", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "network" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77665,17 +84869,11 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -77686,26 +84884,24 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } -// method id "compute.networkEndpointGroups.insert": +// method id "compute.networks.insert": -type NetworkEndpointGroupsInsertCall struct { - s *Service - project string - zone string - networkendpointgroup *NetworkEndpointGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. (== -// suppress_warning http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { - c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkendpointgroup = networkendpointgroup + c.network = network return c } @@ -77723,7 +84919,7 @@ func (r *NetworkEndpointGroupsService) Insert(project string, zone string, netwo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77731,7 +84927,7 @@ func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77739,36 +84935,36 @@ func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { +func (c *NetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -77777,19 +84973,18 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.insert" call. +// Do executes the "compute.networks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77820,12 +85015,11 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a network in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.insert", + // "id": "compute.networks.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -77839,17 +85033,11 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "path": "{project}/global/networks", // "request": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "Network" // }, // "response": { // "$ref": "Operation" @@ -77862,25 +85050,23 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.networkEndpointGroups.list": +// method id "compute.networks.list": -type NetworkEndpointGroupsListCall struct { +type NetworksListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of network endpoint groups that are located -// in the specified project and zone. (== suppress_warning -// http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { - c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -77888,36 +85074,37 @@ func (r *NetworkEndpointGroupsService) List(project string, zone string) *Networ // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -77927,22 +85114,23 @@ func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEnd // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -77950,7 +85138,7 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77960,7 +85148,7 @@ func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEnd // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { c.ifNoneMatch_ = entityTag return c } @@ -77968,23 +85156,23 @@ func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListCall) Header() http.Header { +func (c *NetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77995,7 +85183,7 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -78004,19 +85192,18 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.list" call. -// Exactly one of *NetworkEndpointGroupList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78035,7 +85222,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupList{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78047,34 +85234,33 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of networks available to the specified project.", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.list", + // "id": "compute.networks.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -78084,17 +85270,11 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "path": "{project}/global/networks", // "response": { - // "$ref": "NetworkEndpointGroupList" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78108,7 +85288,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -78126,27 +85306,35 @@ func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*Netwo } } -// method id "compute.networkEndpointGroups.listNetworkEndpoints": +// method id "compute.networks.listPeeringRoutes": -type NetworkEndpointGroupsListNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksListPeeringRoutesCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListNetworkEndpoints: Lists the network endpoints in the specified -// network endpoint group. (== suppress_warning http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { - c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPeeringRoutes: Lists the peering routes exchanged over peering +// connection. +func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { + c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest + c.network = network + return c +} + +// Direction sets the optional parameter "direction": The direction of +// the exchanged routes. +// +// Possible values: +// "INCOMING" +// "OUTGOING" +func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("direction", direction) return c } @@ -78154,36 +85342,37 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -78193,91 +85382,112 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults in // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("pageToken", pageToken) return c } +// PeeringName sets the optional parameter "peeringName": The response +// will show routes exchanged over the given peering connection. +func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("peeringName", peeringName) + return c +} + +// Region sets the optional parameter "region": The region of the +// request. The response will include all subnet routes, static routes +// and dynamic routes in the region. +func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("region", region) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { +func (c *NetworksListPeeringRoutesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listPeeringRoutes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. -// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { +// Do executes the "compute.networks.listPeeringRoutes" call. +// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78296,7 +85506,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupsListNetworkEndpoints{ + ret := &ExchangedPeeringRoutesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78308,41 +85518,59 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists the network endpoints in the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.listNetworkEndpoints", + // "description": "Lists the peering routes exchanged over peering connection.", + // "httpMethod": "GET", + // "id": "compute.networks.listPeeringRoutes", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "network" // ], // "parameters": { + // "direction": { + // "description": "The direction of the exchanged routes.", + // "enum": [ + // "INCOMING", + // "OUTGOING" + // ], + // "enumDescriptions": [ + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "network": { + // "description": "Name of the network for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "peeringName": { + // "description": "The response will show routes exchanged over the given peering connection.", // "location": "query", // "type": "string" // }, @@ -78353,19 +85581,15 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, + // "region": { + // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", - // "request": { - // "$ref": "NetworkEndpointGroupsListEndpointsRequest" - // }, + // "path": "{project}/global/networks/{network}/listPeeringRoutes", // "response": { - // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + // "$ref": "ExchangedPeeringRoutesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78379,7 +85603,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { +func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -78397,34 +85621,52 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Contex } } -// method id "compute.networkEndpointGroups.testIamPermissions": +// method id "compute.networks.patch": -type NetworkEndpointGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { - c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified network with the data included in the +// request. Only the following fields can be modified: +// routingConfig.routingMode. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.network = network + c.network2 = network2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78432,58 +85674,57 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { +func (c *NetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.networks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78502,7 +85743,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78514,72 +85755,67 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.testIamPermissions", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "network" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "network": { + // "description": "Name of the network to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "path": "{project}/global/networks/{network}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Network" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.addPeering": +// method id "compute.networks.removePeering": -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksRemovePeeringCall struct { + s *Service + project string + network string + networksremovepeeringrequest *NetworksRemovePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddPeering: Adds a peering to the specified network. (== -// suppress_warning http-rest-shadowed ==) -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemovePeering: Removes a peering from the specified network. +func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { + c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest + c.networksremovepeeringrequest = networksremovepeeringrequest return c } @@ -78597,7 +85833,7 @@ func (r *NetworksService) AddPeering(project string, network string, networksadd // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78605,7 +85841,7 @@ func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeering // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { +func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78613,36 +85849,36 @@ func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeerin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { +func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { +func (c *NetworksRemovePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -78656,14 +85892,14 @@ func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.addPeering" call. +// Do executes the "compute.networks.removePeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78694,16 +85930,16 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network. (== suppress_warning http-rest-shadowed ==)", + // "description": "Removes a peering from the specified network.", // "httpMethod": "POST", - // "id": "compute.networks.addPeering", + // "id": "compute.networks.removePeering", // "parameterOrder": [ // "project", // "network" // ], // "parameters": { // "network": { - // "description": "Name of the network resource to add peering to.", + // "description": "Name of the network resource to remove peering from.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -78722,9 +85958,9 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/addPeering", + // "path": "{project}/global/networks/{network}/removePeering", // "request": { - // "$ref": "NetworksAddPeeringRequest" + // "$ref": "NetworksRemovePeeringRequest" // }, // "response": { // "$ref": "Operation" @@ -78737,9 +85973,9 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.networks.delete": +// method id "compute.networks.switchToCustomMode": -type NetworksDeleteCall struct { +type NetworksSwitchToCustomModeCall struct { s *Service project string network string @@ -78748,11 +85984,10 @@ type NetworksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified network. (== suppress_warning -// http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.network = network return c @@ -78772,7 +86007,7 @@ func (r *NetworksService) Delete(project string, network string) *NetworksDelete // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78780,7 +86015,7 @@ func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78788,23 +86023,23 @@ func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78812,9 +86047,9 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -78826,14 +86061,14 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. +// Do executes the "compute.networks.switchToCustomMode" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78864,16 +86099,16 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "httpMethod": "POST", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ // "project", // "network" // ], // "parameters": { // "network": { - // "description": "Name of the network to delete.", + // "description": "Name of the network to be updated.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -78892,7 +86127,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/global/networks/{network}/switchToCustomMode", // "response": { // "$ref": "Operation" // }, @@ -78904,80 +86139,92 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.get": +// method id "compute.networks.updatePeering": -type NetworksGetCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksUpdatePeeringCall struct { + s *Service + project string + network string + networksupdatepeeringrequest *NetworksUpdatePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network. Gets a list of available networks -// by making a list() request. (== suppress_warning http-rest-shadowed -// ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePeering: Updates the specified network peering with the data +// included in the request Only the following fields can be modified: +// NetworkPeering.export_custom_routes, and +// NetworkPeering.import_custom_routes +func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { + c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.network = network + c.networksupdatepeeringrequest = networksupdatepeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { +func (c *NetworksUpdatePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -78989,14 +86236,14 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +// Do executes the "compute.networks.updatePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79015,7 +86262,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Network{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79027,16 +86274,16 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.networks.get", + // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes", + // "httpMethod": "PATCH", + // "id": "compute.networks.updatePeering", // "parameterOrder": [ // "project", // "network" // ], // "parameters": { // "network": { - // "description": "Name of the network to return.", + // "description": "Name of the network resource which the updated peering is belonging to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79048,39 +86295,48 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/global/networks/{network}/updatePeering", + // "request": { + // "$ref": "NetworksUpdatePeeringRequest" + // }, // "response": { - // "$ref": "Network" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.insert": +// method id "compute.nodeGroups.addNodes": -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsAddNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddNodes: Adds specified number of nodes to the node group. +func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { + c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest return c } @@ -79098,7 +86354,7 @@ func (r *NetworksService) Insert(project string, network *Network) *NetworksInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { +func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79106,7 +86362,7 @@ func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79114,36 +86370,36 @@ func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *NodeGroupsAddNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -79151,19 +86407,21 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. +// Do executes the "compute.nodeGroups.addNodes" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79194,13 +86452,22 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds specified number of nodes to the node group.", // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "id": "compute.nodeGroups.addNodes", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "nodeGroup" // ], // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -79212,11 +86479,18 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", // "request": { - // "$ref": "Network" + // "$ref": "NodeGroupsAddNodesRequest" // }, // "response": { // "$ref": "Operation" @@ -79229,9 +86503,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.list": +// method id "compute.nodeGroups.aggregatedList": -type NetworksListCall struct { +type NodeGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -79240,11 +86514,10 @@ type NetworksListCall struct { header_ http.Header } -// List: Retrieves the list of networks available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node groups. Note: +// use nodeGroups.listNodes for more details about each group. +func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { + c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -79253,36 +86526,50 @@ func (r *NetworksService) List(project string) *NetworksListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NodeGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -79292,22 +86579,23 @@ func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { +func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { +func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -79315,7 +86603,7 @@ func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79325,7 +86613,7 @@ func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { +func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -79333,23 +86621,23 @@ func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *NodeGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79360,7 +86648,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -79373,14 +86661,14 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +// Do executes the "compute.nodeGroups.aggregatedList" call. +// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79399,7 +86687,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &NodeGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79411,33 +86699,38 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", // "httpMethod": "GET", - // "id": "compute.networks.list", + // "id": "compute.nodeGroups.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -79449,9 +86742,9 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/aggregated/nodeGroups", // "response": { - // "$ref": "NetworkList" + // "$ref": "NodeGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79465,7 +86758,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { +func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -79483,27 +86776,24 @@ func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error } } -// method id "compute.networks.patch": +// method id "compute.nodeGroups.delete": -type NetworksPatchCall struct { +type NodeGroupsDeleteCall struct { s *Service project string - network string - network2 *Network + zone string + nodeGroup string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches the specified network with the data included in the -// request. Only the following fields can be modified: -// routingConfig.routingMode. (== suppress_warning http-rest-shadowed -// ==) -func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { - c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified NodeGroup resource. +func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { + c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.network2 = network2 + c.zone = zone + c.nodeGroup = nodeGroup return c } @@ -79521,7 +86811,7 @@ func (r *NetworksService) Patch(project string, network string, network2 *Networ // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { +func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79529,7 +86819,7 @@ func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { +func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79537,57 +86827,53 @@ func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { +func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksPatchCall) Header() http.Header { +func (c *NodeGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.patch" call. +// Do executes the "compute.nodeGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79618,16 +86904,17 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.networks.patch", + // "description": "Deletes the specified NodeGroup resource.", + // "httpMethod": "DELETE", + // "id": "compute.nodeGroups.delete", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network to update.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79644,12 +86931,16 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", - // "request": { - // "$ref": "Network" - // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { // "$ref": "Operation" // }, @@ -79661,25 +86952,26 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.networks.removePeering": +// method id "compute.nodeGroups.deleteNodes": -type NetworksRemovePeeringCall struct { +type NodeGroupsDeleteNodesCall struct { s *Service project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest + zone string + nodeGroup string + nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// RemovePeering: Removes a peering from the specified network. (== -// suppress_warning http-rest-shadowed ==) -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteNodes: Deletes specified nodes from the node group. +func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { + c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest return c } @@ -79697,7 +86989,7 @@ func (r *NetworksService) RemovePeering(project string, network string, networks // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { +func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79705,7 +86997,7 @@ func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemoveP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { +func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79713,36 +87005,36 @@ func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemove // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { +func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { +func (c *NodeGroupsDeleteNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -79750,20 +87042,21 @@ func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.removePeering" call. +// Do executes the "compute.nodeGroups.deleteNodes" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79794,16 +87087,17 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes specified nodes from the node group.", // "httpMethod": "POST", - // "id": "compute.networks.removePeering", + // "id": "compute.nodeGroups.deleteNodes", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes will be deleted.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79820,11 +87114,18 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/removePeering", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", // "request": { - // "$ref": "NetworksRemovePeeringRequest" + // "$ref": "NodeGroupsDeleteNodesRequest" // }, // "response": { // "$ref": "Operation" @@ -79837,102 +87138,101 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.networks.switchToCustomMode": +// method id "compute.nodeGroups.get": -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsGetCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. (== suppress_warning http-rest-shadowed ==) -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified NodeGroup. Get a list of available +// NodeGroups by making a list() request. Note: the "nodes" field should +// not be used. Use nodeGroups.listNodes instead. +func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { + c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.nodeGroup = nodeGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { +func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { +func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { +func (c *NodeGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.switchToCustomMode" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeGroups.get" call. +// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NodeGroup.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79951,7 +87251,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79963,16 +87263,17 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", + // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.get", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", + // "nodeGroup": { + // "description": "Name of the node group to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79985,130 +87286,121 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/switchToCustomMode", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "NodeGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.updatePeering": +// method id "compute.nodeGroups.getIamPolicy": -type NetworksUpdatePeeringCall struct { - s *Service - project string - network string - networksupdatepeeringrequest *NetworksUpdatePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdatePeering: Updates the specified network peering with the data -// included in the request Only the following fields can be modified: -// NetworkPeering.export_custom_routes, and -// NetworkPeering.import_custom_routes (== suppress_warning -// http-rest-shadowed ==) -func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { - c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { + c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksupdatepeeringrequest = networksupdatepeeringrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { +func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { +func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksUpdatePeeringCall) Header() http.Header { +func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.updatePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80127,7 +87419,7 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80139,70 +87431,70 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PATCH", - // "id": "compute.networks.updatePeering", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.getIamPolicy", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource which the updated peering is belonging to.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/updatePeering", - // "request": { - // "$ref": "NetworksUpdatePeeringRequest" - // }, + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.addNodes": +// method id "compute.nodeGroups.insert": -type NodeGroupsAddNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsInsertCall struct { + s *Service + project string + zone string + nodegroup *NodeGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddNodes: Adds specified number of nodes to the node group. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { - c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a NodeGroup resource in the specified project using +// the data included in the request. +func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { + c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest + c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) + c.nodegroup = nodegroup return c } @@ -80220,7 +87512,7 @@ func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { +func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80228,7 +87520,7 @@ func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodes // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { +func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80236,36 +87528,36 @@ func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNode // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { +func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAddNodesCall) Header() http.Header { +func (c *NodeGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -80273,21 +87565,20 @@ func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.addNodes" call. +// Do executes the "compute.nodeGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80318,21 +87609,21 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds specified number of nodes to the node group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.addNodes", + // "id": "compute.nodeGroups.insert", // "parameterOrder": [ // "project", // "zone", - // "nodeGroup" + // "initialNodeCount" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "initialNodeCount": { + // "description": "Initial count of nodes in the node group.", + // "format": "int32", + // "location": "query", // "required": true, - // "type": "string" + // "type": "integer" // }, // "project": { // "description": "Project ID for this request.", @@ -80354,9 +87645,9 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + // "path": "{project}/zones/{zone}/nodeGroups", // "request": { - // "$ref": "NodeGroupsAddNodesRequest" + // "$ref": "NodeGroup" // }, // "response": { // "$ref": "Operation" @@ -80369,23 +87660,25 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.nodeGroups.aggregatedList": +// method id "compute.nodeGroups.list": -type NodeGroupsAggregatedListCall struct { +type NodeGroupsListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node groups. Note: -// use nodeGroups.listNodes for more details about each group. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { - c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of node groups available to the specified +// project. Note: use nodeGroups.listNodes for more details about each +// group. +func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { + c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -80393,36 +87686,37 @@ func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregated // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -80432,22 +87726,23 @@ func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsA // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { +func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { +func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -80455,7 +87750,7 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { +func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80465,7 +87760,7 @@ func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsA // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { +func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -80473,23 +87768,23 @@ func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroups // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { +func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAggregatedListCall) Header() http.Header { +func (c *NodeGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80500,7 +87795,7 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -80509,18 +87804,19 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.aggregatedList" call. -// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.nodeGroups.list" call. +// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeGroupList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { +func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80539,7 +87835,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupAggregatedList{ + ret := &NodeGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80551,33 +87847,34 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr } return ret, nil // { - // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.aggregatedList", + // "id": "compute.nodeGroups.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -80587,11 +87884,18 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeGroups", + // "path": "{project}/zones/{zone}/nodeGroups", // "response": { - // "$ref": "NodeGroupAggregatedList" + // "$ref": "NodeGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80605,7 +87909,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { +func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -80623,9 +87927,9 @@ func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGr } } -// method id "compute.nodeGroups.delete": +// method id "compute.nodeGroups.listNodes": -type NodeGroupsDeleteCall struct { +type NodeGroupsListNodesCall struct { s *Service project string zone string @@ -80635,39 +87939,84 @@ type NodeGroupsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified NodeGroup resource. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { - c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNodes: Lists nodes in the node group. +func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { + c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.nodeGroup = nodeGroup return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { +func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80675,23 +88024,23 @@ func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { +func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteCall) Header() http.Header { +func (c *NodeGroupsListNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80699,9 +88048,9 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -80714,14 +88063,14 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.listNodes" call. +// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeGroupsListNodes.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80740,7 +88089,7 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeGroupsListNodes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80752,22 +88101,45 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified NodeGroup resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.nodeGroups.delete", + // "description": "Lists nodes in the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.listNodes", // "parameterOrder": [ // "project", // "zone", // "nodeGroup" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "description": "Name of the NodeGroup resource whose nodes you want to list.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -80775,11 +88147,6 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -80788,39 +88155,60 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", // "response": { - // "$ref": "Operation" + // "$ref": "NodeGroupsListNodes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.deleteNodes": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeGroupsDeleteNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.nodeGroups.patch": + +type NodeGroupsPatchCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroup *NodeGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteNodes: Deletes specified nodes from the node group. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { - c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patch the node group. +func (r *NodeGroupsService) Patch(project string, zone string, nodeGroup string, nodegroup *NodeGroup) *NodeGroupsPatchCall { + c := &NodeGroupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.nodeGroup = nodeGroup - c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest + c.nodegroup = nodegroup return c } @@ -80838,7 +88226,7 @@ func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { +func (c *NodeGroupsPatchCall) RequestId(requestId string) *NodeGroupsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80846,7 +88234,7 @@ func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDelet // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { +func (c *NodeGroupsPatchCall) Fields(s ...googleapi.Field) *NodeGroupsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80854,38 +88242,38 @@ func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDele // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { +func (c *NodeGroupsPatchCall) Context(ctx context.Context) *NodeGroupsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteNodesCall) Header() http.Header { +func (c *NodeGroupsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -80898,14 +88286,14 @@ func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.deleteNodes" call. +// Do executes the "compute.nodeGroups.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80936,9 +88324,9 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes specified nodes from the node group. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.deleteNodes", + // "description": "Patch the node group.", + // "httpMethod": "PATCH", + // "id": "compute.nodeGroups.patch", // "parameterOrder": [ // "project", // "zone", @@ -80946,7 +88334,7 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation // ], // "parameters": { // "nodeGroup": { - // "description": "Name of the NodeGroup resource whose nodes will be deleted.", + // "description": "Name of the NodeGroup resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -80972,9 +88360,9 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "request": { - // "$ref": "NodeGroupsDeleteNodesRequest" + // "$ref": "NodeGroup" // }, // "response": { // "$ref": "Operation" @@ -80987,102 +88375,93 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.nodeGroups.get": +// method id "compute.nodeGroups.setIamPolicy": -type NodeGroupsGetCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeGroupsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified NodeGroup. Get a list of available -// NodeGroups by making a list() request. Note: the "nodes" field should -// not be used. Use nodeGroups.listNodes instead. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { - c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { + c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.nodeGroup = nodeGroup + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { +func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { +func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetCall) Header() http.Header { +func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.get" call. -// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeGroup.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { +// Do executes the "compute.nodeGroups.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81101,7 +88480,7 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroup{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81113,26 +88492,26 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) } return ret, nil // { - // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.get", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.setIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "nodeGroup" + // "resource" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the node group to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -81144,114 +88523,126 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "NodeGroup" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeGroups.getIamPolicy": +// method id "compute.nodeGroups.setNodeTemplate": -type NodeGroupsGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeGroupsSetNodeTemplateCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { - c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNodeTemplate: Updates the node template of the node group. +func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { + c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource + c.nodeGroup = nodeGroup + c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { +func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { +func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { +func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.nodeGroups.setNodeTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81270,7 +88661,7 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81282,15 +88673,22 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.getIamPolicy", + // "description": "Updates the node template of the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.setNodeTemplate", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "nodeGroup" // ], // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81298,11 +88696,9 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -81313,66 +88709,49 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + // "request": { + // "$ref": "NodeGroupsSetNodeTemplateRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeGroups.insert": +// method id "compute.nodeGroups.testIamPermissions": -type NodeGroupsInsertCall struct { - s *Service - project string - zone string - nodegroup *NodeGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a NodeGroup resource in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { - c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { + c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) - c.nodegroup = nodegroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { +func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81380,36 +88759,36 @@ func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { +func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsInsertCall) Header() http.Header { +func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -81417,20 +88796,21 @@ func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81449,7 +88829,7 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81461,22 +88841,15 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a NodeGroup resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.insert", + // "id": "compute.nodeGroups.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "initialNodeCount" + // "resource" // ], // "parameters": { - // "initialNodeCount": { - // "description": "Initial count of nodes in the node group.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81484,9 +88857,11 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -81497,40 +88872,37 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups", + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", // "request": { - // "$ref": "NodeGroup" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.list": +// method id "compute.nodeTemplates.aggregatedList": -type NodeGroupsListCall struct { +type NodeTemplatesAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node groups available to the specified -// project. Note: use nodeGroups.listNodes for more details about each -// group. (== suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { - c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node templates. +func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { + c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -81538,36 +88910,50 @@ func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCal // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NodeTemplatesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -81577,22 +88963,23 @@ func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { +func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { +func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -81600,7 +88987,7 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { +func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81610,7 +88997,7 @@ func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { +func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -81618,23 +89005,23 @@ func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { +func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListCall) Header() http.Header { +func (c *NodeTemplatesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81645,7 +89032,7 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -81654,19 +89041,18 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.list" call. -// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeGroupList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.nodeTemplates.aggregatedList" call. +// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { +func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81685,7 +89071,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupList{ + ret := &NodeTemplateAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81697,34 +89083,38 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e } return ret, nil // { - // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of node templates.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.list", + // "id": "compute.nodeTemplates.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -81734,18 +89124,11 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups", + // "path": "{project}/aggregated/nodeTemplates", // "response": { - // "$ref": "NodeGroupList" + // "$ref": "NodeTemplateAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81759,7 +89142,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { +func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -81777,149 +89160,276 @@ func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) e } } -// method id "compute.nodeGroups.listNodes": +// method id "compute.nodeTemplates.delete": -type NodeGroupsListNodesCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesDeleteCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListNodes: Lists nodes in the node group. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { - c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified NodeTemplate resource. +func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { + c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup + c.region = region + c.nodeTemplate = nodeTemplate return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified NodeTemplate resource.", + // "httpMethod": "DELETE", + // "id": "compute.nodeTemplates.delete", + // "parameterOrder": [ + // "project", + // "region", + // "nodeTemplate" + // ], + // "parameters": { + // "nodeTemplate": { + // "description": "Name of the NodeTemplate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeTemplates.get": + +type NodeTemplatesGetCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified node template. Gets a list of available +// node templates by making a list() request. +func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { + c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.nodeTemplate = nodeTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { +func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { +func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListNodesCall) Header() http.Header { +func (c *NodeTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.listNodes" call. -// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupsListNodes.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { +// Do executes the "compute.nodeTemplates.get" call. +// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81938,7 +89448,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupsListNodes{ + ret := &NodeTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81950,45 +89460,22 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL } return ret, nil // { - // "description": "Lists nodes in the node group. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.listNodes", + // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.get", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region", + // "nodeTemplate" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "nodeGroup": { - // "description": "Name of the NodeGroup resource whose nodes you want to list.", + // "nodeTemplate": { + // "description": "Name of the node template to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81996,17 +89483,17 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "response": { - // "$ref": "NodeGroupsListNodes" + // "$ref": "NodeTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82017,115 +89504,100 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeGroups.setIamPolicy": +// method id "compute.nodeTemplates.getIamPolicy": -type NodeGroupsSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { - c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { + c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { +func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { +func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { +func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setIamPolicy" call. +// Do executes the "compute.nodeTemplates.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82156,12 +89628,12 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.setIamPolicy", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", + // "region", // "resource" // ], // "parameters": { @@ -82172,57 +89644,53 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.setNodeTemplate": +// method id "compute.nodeTemplates.insert": -type NodeGroupsSetNodeTemplateCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesInsertCall struct { + s *Service + project string + region string + nodetemplate *NodeTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNodeTemplate: Updates the node template of the node group. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { - c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a NodeTemplate resource in the specified project +// using the data included in the request. +func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { + c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest + c.region = region + c.nodetemplate = nodetemplate return c } @@ -82240,7 +89708,7 @@ func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGro // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { +func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -82248,7 +89716,7 @@ func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { +func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82256,36 +89724,36 @@ func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroups // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { +func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { +func (c *NodeTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -82293,21 +89761,20 @@ func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setNodeTemplate" call. +// Do executes the "compute.nodeTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82338,26 +89805,25 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Updates the node template of the node group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.setNodeTemplate", + // "id": "compute.nodeTemplates.insert", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to update.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -82365,18 +89831,11 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + // "path": "{project}/regions/{region}/nodeTemplates", // "request": { - // "$ref": "NodeGroupsSetNodeTemplateRequest" + // "$ref": "NodeTemplate" // }, // "response": { // "$ref": "Operation" @@ -82389,93 +89848,162 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.nodeGroups.testIamPermissions": +// method id "compute.nodeTemplates.list": -type NodeGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { - c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of node templates available to the specified +// project. +func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { + c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { +func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { +func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { +func (c *NodeTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.nodeTemplates.list" call. +// Exactly one of *NodeTemplateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *NodeTemplateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82494,7 +90022,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &NodeTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82506,15 +90034,37 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.testIamPermissions", + // "description": "Retrieves a list of node templates available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.list", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82522,27 +90072,17 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/nodeTemplates", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "NodeTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82553,157 +90093,114 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } -// method id "compute.nodeTemplates.aggregatedList": - -type NodeTemplatesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of node templates. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { - c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} +// method id "compute.nodeTemplates.setIamPolicy": -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +type NodeTemplatesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { + c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { +func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { +func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesAggregatedListCall) Header() http.Header { +func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.aggregatedList" call. -// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { +// Do executes the "compute.nodeTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82722,7 +90219,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplateAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82734,123 +90231,80 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod } return ret, nil // { - // "description": "Retrieves an aggregated list of node templates. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.aggregatedList", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.setIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeTemplates", + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, // "response": { - // "$ref": "NodeTemplateAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeTemplates.delete": +// method id "compute.nodeTemplates.testIamPermissions": -type NodeTemplatesDeleteCall struct { - s *Service - project string - region string - nodeTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeTemplate resource. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { - c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { + c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.nodeTemplate = nodeTemplate - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { +func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82858,53 +90312,58 @@ func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { +func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesDeleteCall) Header() http.Header { +func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82923,7 +90382,7 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82935,22 +90394,15 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified NodeTemplate resource. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "DELETE", - // "id": "compute.nodeTemplates.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "nodeTemplate" + // "resource" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the NodeTemplate resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82965,52 +90417,130 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeTemplates.get": +// method id "compute.nodeTypes.aggregatedList": -type NodeTemplatesGetCall struct { +type NodeTypesAggregatedListCall struct { s *Service project string - region string - nodeTemplate string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { - c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node types. +func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { + c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodeTemplate = nodeTemplate + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NodeTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { +func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83020,7 +90550,7 @@ func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { +func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -83028,23 +90558,23 @@ func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { +func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetCall) Header() http.Header { +func (c *NodeTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83055,7 +90585,7 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -83063,21 +90593,19 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.get" call. -// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeTemplate.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { +// Do executes the "compute.nodeTypes.aggregatedList" call. +// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83096,7 +90624,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplate{ + ret := &NodeTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83108,20 +90636,39 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of node types.", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.get", + // "id": "compute.nodeTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "nodeTemplate" + // "project" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the node template to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -83130,18 +90677,11 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "{project}/aggregated/nodeTypes", // "response": { - // "$ref": "NodeTemplate" + // "$ref": "NodeTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -83152,34 +90692,54 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } -// method id "compute.nodeTemplates.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeTemplatesGetIamPolicyCall struct { +// method id "compute.nodeTypes.get": + +type NodeTypesGetCall struct { s *Service project string - region string - resource string + zone string + nodeType string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { - c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified node type. Gets a list of available node +// types by making a list() request. +func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { + c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource + c.zone = zone + c.nodeType = nodeType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { +func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83189,7 +90749,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTempla // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { +func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -83197,23 +90757,23 @@ func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTempl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { +func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { +func (c *NodeTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83224,7 +90784,7 @@ func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -83233,20 +90793,20 @@ func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "resource": c.resource, + "zone": c.zone, + "nodeType": c.nodeType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.nodeTypes.get" call. +// Exactly one of *NodeType or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// *NodeType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83265,7 +90825,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &NodeType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83277,40 +90837,40 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.getIamPolicy", + // "id": "compute.nodeTypes.get", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "nodeType" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "nodeType": { + // "description": "Name of the node type to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", // "response": { - // "$ref": "Policy" + // "$ref": "NodeType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -83321,110 +90881,162 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.nodeTemplates.insert": +// method id "compute.nodeTypes.list": -type NodeTemplatesInsertCall struct { +type NodeTypesListCall struct { s *Service project string - region string - nodetemplate *NodeTemplate + zone string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Insert: Creates a NodeTemplate resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { - c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of node types available to the specified +// project. +func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { + c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodetemplate = nodetemplate + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { +func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { +func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesInsertCall) Header() http.Header { +func (c *NodeTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeTypes.list" call. +// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NodeTypeList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83443,7 +91055,7 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83455,14 +91067,37 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.nodeTemplates.insert", + // "description": "Retrieves a list of node types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.list", // "parameterOrder": [ // "project", - // "region" + // "zone" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83470,52 +91105,63 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates", - // "request": { - // "$ref": "NodeTemplate" - // }, + // "path": "{project}/zones/{zone}/nodeTypes", // "response": { - // "$ref": "Operation" + // "$ref": "NodeTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeTemplates.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeTemplatesListCall struct { +// method id "compute.packetMirrorings.aggregatedList": + +type PacketMirroringsAggregatedListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node templates available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { - c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of packetMirrorings. +func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirroringsAggregatedListCall { + c := &PacketMirroringsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -83523,36 +91169,50 @@ func (r *NodeTemplatesService) List(project string, region string) *NodeTemplate // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *PacketMirroringsAggregatedListCall) Filter(filter string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *PacketMirroringsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PacketMirroringsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *PacketMirroringsAggregatedListCall) MaxResults(maxResults int64) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -83562,22 +91222,23 @@ func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListC // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { +func (c *PacketMirroringsAggregatedListCall) OrderBy(orderBy string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { +func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -83585,7 +91246,7 @@ func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { +func (c *PacketMirroringsAggregatedListCall) Fields(s ...googleapi.Field) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83595,7 +91256,7 @@ func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { +func (c *PacketMirroringsAggregatedListCall) IfNoneMatch(entityTag string) *PacketMirroringsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -83603,23 +91264,23 @@ func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { +func (c *PacketMirroringsAggregatedListCall) Context(ctx context.Context) *PacketMirroringsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesListCall) Header() http.Header { +func (c *PacketMirroringsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83630,7 +91291,7 @@ func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/packetMirrorings") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -83639,19 +91300,18 @@ func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.list" call. -// Exactly one of *NodeTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.packetMirrorings.aggregatedList" call. +// Exactly one of *PacketMirroringAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *PacketMirroringAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { +func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (*PacketMirroringAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83670,7 +91330,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplateList{ + ret := &PacketMirroringAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83682,34 +91342,38 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL } return ret, nil // { - // "description": "Retrieves a list of node templates available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of packetMirrorings.", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.list", + // "id": "compute.packetMirrorings.aggregatedList", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -83719,18 +91383,11 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates", + // "path": "{project}/aggregated/packetMirrorings", // "response": { - // "$ref": "NodeTemplateList" + // "$ref": "PacketMirroringAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -83744,7 +91401,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { +func (c *PacketMirroringsAggregatedListCall) Pages(ctx context.Context, f func(*PacketMirroringAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -83762,35 +91419,50 @@ func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateL } } -// method id "compute.nodeTemplates.setIamPolicy": +// method id "compute.packetMirrorings.delete": -type NodeTemplatesSetIamPolicyCall struct { - s *Service - project string - region string - resource string - regionsetpolicyrequest *RegionSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PacketMirroringsDeleteCall struct { + s *Service + project string + region string + packetMirroring string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { - c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified PacketMirroring resource. +func (r *PacketMirroringsService) Delete(project string, region string, packetMirroring string) *PacketMirroringsDeleteCall { + c := &PacketMirroringsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.packetMirroring = packetMirroring + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *PacketMirroringsDeleteCall) RequestId(requestId string) *PacketMirroringsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { +func (c *PacketMirroringsDeleteCall) Fields(s ...googleapi.Field) *PacketMirroringsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83798,58 +91470,53 @@ func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTempla // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { +func (c *PacketMirroringsDeleteCall) Context(ctx context.Context) *PacketMirroringsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { +func (c *PacketMirroringsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/packetMirrorings/{packetMirroring}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.packetMirrorings.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83868,7 +91535,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83880,15 +91547,22 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "POST", - // "id": "compute.nodeTemplates.setIamPolicy", + // "description": "Deletes the specified PacketMirroring resource.", + // "httpMethod": "DELETE", + // "id": "compute.packetMirrorings.delete", // "parameterOrder": [ // "project", // "region", - // "resource" + // "packetMirroring" // ], // "parameters": { + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83897,63 +91571,242 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.packetMirrorings.get": + +type PacketMirroringsGetCall struct { + s *Service + project string + region string + packetMirroring string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified PacketMirroring resource. +func (r *PacketMirroringsService) Get(project string, region string, packetMirroring string) *PacketMirroringsGetCall { + c := &PacketMirroringsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.packetMirroring = packetMirroring + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PacketMirroringsGetCall) Fields(s ...googleapi.Field) *PacketMirroringsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PacketMirroringsGetCall) IfNoneMatch(entityTag string) *PacketMirroringsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PacketMirroringsGetCall) Context(ctx context.Context) *PacketMirroringsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PacketMirroringsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/packetMirrorings/{packetMirroring}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.packetMirrorings.get" call. +// Exactly one of *PacketMirroring or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PacketMirroring.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirroring, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &PacketMirroring{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified PacketMirroring resource.", + // "httpMethod": "GET", + // "id": "compute.packetMirrorings.get", + // "parameterOrder": [ + // "project", + // "region", + // "packetMirroring" + // ], + // "parameters": { + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", - // "request": { - // "$ref": "RegionSetPolicyRequest" - // }, + // "path": "{project}/regions/{region}/packetMirrorings/{packetMirroring}", // "response": { - // "$ref": "Policy" + // "$ref": "PacketMirroring" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeTemplates.testIamPermissions": +// method id "compute.packetMirrorings.insert": -type NodeTemplatesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PacketMirroringsInsertCall struct { + s *Service + project string + region string + packetmirroring *PacketMirroring + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) -func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { - c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a PacketMirroring resource in the specified project +// and region using the data included in the request. +func (r *PacketMirroringsService) Insert(project string, region string, packetmirroring *PacketMirroring) *PacketMirroringsInsertCall { + c := &PacketMirroringsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.packetmirroring = packetmirroring + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *PacketMirroringsInsertCall) RequestId(requestId string) *PacketMirroringsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { +func (c *PacketMirroringsInsertCall) Fields(s ...googleapi.Field) *PacketMirroringsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83961,36 +91814,36 @@ func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Node // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { +func (c *PacketMirroringsInsertCall) Context(ctx context.Context) *PacketMirroringsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *PacketMirroringsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/packetMirrorings") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -83998,21 +91851,20 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.packetMirrorings.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84031,7 +91883,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84043,13 +91895,12 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a PacketMirroring resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.testIamPermissions", + // "id": "compute.packetMirrorings.insert", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "region" // ], // "parameters": { // "project": { @@ -84060,52 +91911,51 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/packetMirrorings", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "PacketMirroring" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTypes.aggregatedList": +// method id "compute.packetMirrorings.list": -type NodeTypesAggregatedListCall struct { +type PacketMirroringsListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node types. (== -// suppress_warning http-rest-shadowed ==) -func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { - c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of PacketMirroring resources available to the +// specified project and region. +func (r *PacketMirroringsService) List(project string, region string) *PacketMirroringsListCall { + c := &PacketMirroringsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -84113,36 +91963,37 @@ func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedLi // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *PacketMirroringsListCall) Filter(filter string) *PacketMirroringsListCall { c.urlParams_.Set("filter", filter) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *PacketMirroringsListCall) MaxResults(maxResults int64) *PacketMirroringsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -84152,22 +92003,23 @@ func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAgg // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { +func (c *PacketMirroringsListCall) OrderBy(orderBy string) *PacketMirroringsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { +func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirroringsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -84175,7 +92027,7 @@ func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { +func (c *PacketMirroringsListCall) Fields(s ...googleapi.Field) *PacketMirroringsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84185,7 +92037,7 @@ func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { +func (c *PacketMirroringsListCall) IfNoneMatch(entityTag string) *PacketMirroringsListCall { c.ifNoneMatch_ = entityTag return c } @@ -84193,23 +92045,23 @@ func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { +func (c *PacketMirroringsListCall) Context(ctx context.Context) *PacketMirroringsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesAggregatedListCall) Header() http.Header { +func (c *PacketMirroringsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84220,7 +92072,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/packetMirrorings") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -84229,18 +92081,19 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.aggregatedList" call. -// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any +// Do executes the "compute.packetMirrorings.list" call. +// Exactly one of *PacketMirroringList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was +// *PacketMirroringList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { +func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirroringList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84259,7 +92112,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTypeAggregatedList{ + ret := &PacketMirroringList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84271,33 +92124,34 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of node types. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of PacketMirroring resources available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.nodeTypes.aggregatedList", + // "id": "compute.packetMirrorings.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -84307,11 +92161,18 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeTypes", + // "path": "{project}/regions/{region}/packetMirrorings", // "response": { - // "$ref": "NodeTypeAggregatedList" + // "$ref": "PacketMirroringList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -84325,7 +92186,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { +func (c *PacketMirroringsListCall) Pages(ctx context.Context, f func(*PacketMirroringList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -84343,101 +92204,113 @@ func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTyp } } -// method id "compute.nodeTypes.get": +// method id "compute.packetMirrorings.patch": -type NodeTypesGetCall struct { - s *Service - project string - zone string - nodeType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PacketMirroringsPatchCall struct { + s *Service + project string + region string + packetMirroring string + packetmirroring *PacketMirroring + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. (== suppress_warning -// http-rest-shadowed ==) -func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { - c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified PacketMirroring resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. +func (r *PacketMirroringsService) Patch(project string, region string, packetMirroring string, packetmirroring *PacketMirroring) *PacketMirroringsPatchCall { + c := &PacketMirroringsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeType = nodeType + c.region = region + c.packetMirroring = packetMirroring + c.packetmirroring = packetmirroring + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroringsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { +func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirroringsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { +func (c *PacketMirroringsPatchCall) Context(ctx context.Context) *PacketMirroringsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesGetCall) Header() http.Header { +func (c *PacketMirroringsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/packetMirrorings/{packetMirroring}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeType": c.nodeType, + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.get" call. -// Exactly one of *NodeType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *NodeType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.packetMirrorings.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { +func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84456,7 +92329,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84468,17 +92341,17 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.nodeTypes.get", + // "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.packetMirrorings.patch", // "parameterOrder": [ // "project", - // "zone", - // "nodeType" + // "region", + // "packetMirroring" // ], // "parameters": { - // "nodeType": { - // "description": "Name of the node type to return.", + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -84491,181 +92364,121 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", + // "path": "{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "request": { + // "$ref": "PacketMirroring" + // }, // "response": { - // "$ref": "NodeType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTypes.list": +// method id "compute.packetMirrorings.testIamPermissions": -type NodeTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PacketMirroringsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of node types available to the specified -// project. (== suppress_warning http-rest-shadowed ==) -func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { - c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *PacketMirroringsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *PacketMirroringsTestIamPermissionsCall { + c := &PacketMirroringsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { +func (c *PacketMirroringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *PacketMirroringsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { +func (c *PacketMirroringsTestIamPermissionsCall) Context(ctx context.Context) *PacketMirroringsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesListCall) Header() http.Header { +func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.list" call. -// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { +// Do executes the "compute.packetMirrorings.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84684,7 +92497,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTypeList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84696,37 +92509,15 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err } return ret, nil // { - // "description": "Retrieves a list of node types available to the specified project. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.nodeTypes.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.packetMirrorings.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -84734,17 +92525,27 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeTypes", + // "path": "{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "NodeTypeList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -84755,27 +92556,6 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "compute.projects.disableXpnHost": type ProjectsDisableXpnHostCall struct { @@ -84787,7 +92567,6 @@ type ProjectsDisableXpnHostCall struct { } // DisableXpnHost: Disable this project as a shared VPC host project. -// (== suppress_warning http-rest-shadowed ==) func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84840,7 +92619,7 @@ func (c *ProjectsDisableXpnHostCall) Header() http.Header { func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84899,7 +92678,7 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Disable this project as a shared VPC host project.", // "httpMethod": "POST", // "id": "compute.projects.disableXpnHost", // "parameterOrder": [ @@ -84943,8 +92722,7 @@ type ProjectsDisableXpnResourceCall struct { } // DisableXpnResource: Disable a service resource (also known as service -// project) associated with this host project. (== suppress_warning -// http-rest-shadowed ==) +// project) associated with this host project. func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84998,7 +92776,7 @@ func (c *ProjectsDisableXpnResourceCall) Header() http.Header { func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85062,7 +92840,7 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a service resource (also known as service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Disable a service resource (also known as service project) associated with this host project.", // "httpMethod": "POST", // "id": "compute.projects.disableXpnResource", // "parameterOrder": [ @@ -85107,8 +92885,7 @@ type ProjectsEnableXpnHostCall struct { header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. (== -// suppress_warning http-rest-shadowed ==) +// EnableXpnHost: Enable this project as a shared VPC host project. func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85161,7 +92938,7 @@ func (c *ProjectsEnableXpnHostCall) Header() http.Header { func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85220,7 +92997,7 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Enable this project as a shared VPC host project.", // "httpMethod": "POST", // "id": "compute.projects.enableXpnHost", // "parameterOrder": [ @@ -85265,8 +93042,7 @@ type ProjectsEnableXpnResourceCall struct { // EnableXpnResource: Enable service resource (a.k.a service project) // for a host project, so that subnets in the host project can be used -// by instances in the service project. (== suppress_warning -// http-rest-shadowed ==) +// by instances in the service project. func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85320,7 +93096,7 @@ func (c *ProjectsEnableXpnResourceCall) Header() http.Header { func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85384,7 +93160,7 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", // "httpMethod": "POST", // "id": "compute.projects.enableXpnResource", // "parameterOrder": [ @@ -85430,8 +93206,7 @@ type ProjectsGetCall struct { header_ http.Header } -// Get: Returns the specified Project resource. (== suppress_warning -// http-rest-shadowed ==) +// Get: Returns the specified Project resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get func (r *ProjectsService) Get(project string) *ProjectsGetCall { c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -85476,7 +93251,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85538,7 +93313,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified Project resource.", // "httpMethod": "GET", // "id": "compute.projects.get", // "parameterOrder": [ @@ -85578,8 +93353,7 @@ type ProjectsGetXpnHostCall struct { } // GetXpnHost: Gets the shared VPC host project that this project links -// to. May be empty if no link exists. (== suppress_warning -// http-rest-shadowed ==) +// to. May be empty if no link exists. func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85623,7 +93397,7 @@ func (c *ProjectsGetXpnHostCall) Header() http.Header { func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85685,7 +93459,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", // "httpMethod": "GET", // "id": "compute.projects.getXpnHost", // "parameterOrder": [ @@ -85724,8 +93498,7 @@ type ProjectsGetXpnResourcesCall struct { } // GetXpnResources: Gets service resources (a.k.a service project) -// associated with this host project. (== suppress_warning -// http-rest-shadowed ==) +// associated with this host project. func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85736,24 +93509,25 @@ func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourc // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("filter", filter) return c @@ -85761,34 +93535,35 @@ func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResou // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("order_by", orderBy) + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("pageToken", pageToken) @@ -85832,7 +93607,7 @@ func (c *ProjectsGetXpnResourcesCall) Header() http.Header { func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85894,7 +93669,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Gets service resources (a.k.a service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets service resources (a.k.a service project) associated with this host project.", // "httpMethod": "GET", // "id": "compute.projects.getXpnResources", // "parameterOrder": [ @@ -85902,25 +93677,25 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -85977,7 +93752,7 @@ type ProjectsListXpnHostsCall struct { } // ListXpnHosts: Lists all shared VPC host projects visible to the user -// in an organization. (== suppress_warning http-rest-shadowed ==) +// in an organization. func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85989,24 +93764,25 @@ func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsreque // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { c.urlParams_.Set("filter", filter) return c @@ -86014,34 +93790,35 @@ func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCa // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("order_by", orderBy) + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { c.urlParams_.Set("pageToken", pageToken) @@ -86075,7 +93852,7 @@ func (c *ProjectsListXpnHostsCall) Header() http.Header { func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86139,7 +93916,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "Lists all shared VPC host projects visible to the user in an organization. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists all shared VPC host projects visible to the user in an organization.", // "httpMethod": "POST", // "id": "compute.projects.listXpnHosts", // "parameterOrder": [ @@ -86147,25 +93924,25 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -86224,8 +94001,7 @@ type ProjectsMoveDiskCall struct { header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. (== -// suppress_warning http-rest-shadowed ==) +// MoveDisk: Moves a persistent disk from one zone to another. func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86279,7 +94055,7 @@ func (c *ProjectsMoveDiskCall) Header() http.Header { func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86343,7 +94119,7 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another. (== suppress_warning http-rest-shadowed ==)", + // "description": "Moves a persistent disk from one zone to another.", // "httpMethod": "POST", // "id": "compute.projects.moveDisk", // "parameterOrder": [ @@ -86390,7 +94166,7 @@ type ProjectsMoveInstanceCall struct { } // MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. (== suppress_warning http-rest-shadowed ==) +// from one zone to another. func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86444,7 +94220,7 @@ func (c *ProjectsMoveInstanceCall) Header() http.Header { func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86508,7 +94284,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another. (== suppress_warning http-rest-shadowed ==)", + // "description": "Moves an instance and its attached persistent disks from one zone to another.", // "httpMethod": "POST", // "id": "compute.projects.moveInstance", // "parameterOrder": [ @@ -86556,7 +94332,6 @@ type ProjectsSetCommonInstanceMetadataCall struct { // SetCommonInstanceMetadata: Sets metadata common to all instances // within the specified project using the data included in the request. -// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86611,7 +94386,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86675,7 +94450,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ @@ -86724,7 +94499,7 @@ type ProjectsSetDefaultNetworkTierCall struct { // SetDefaultNetworkTier: Sets the default network tier of the project. // The default network tier is used when an // address/forwardingRule/instance is created without specifying the -// network tier field. (== suppress_warning http-rest-shadowed ==) +// network tier field. func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86778,7 +94553,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86842,7 +94617,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", // "httpMethod": "POST", // "id": "compute.projects.setDefaultNetworkTier", // "parameterOrder": [ @@ -86891,7 +94666,7 @@ type ProjectsSetUsageExportBucketCall struct { // SetUsageExportBucket: Enables the usage export feature and sets the // usage export bucket where reports are stored. If you provide an empty // request body using this method, the usage export feature will be -// disabled. (== suppress_warning http-rest-shadowed ==) +// disabled. // For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86946,7 +94721,7 @@ func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87010,7 +94785,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled. (== suppress_warning http-rest-shadowed ==)", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", // "httpMethod": "POST", // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ @@ -87060,8 +94835,7 @@ type RegionAutoscalersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified autoscaler. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified autoscaler. func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87116,7 +94890,7 @@ func (c *RegionAutoscalersDeleteCall) Header() http.Header { func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87177,7 +94951,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified autoscaler.", // "httpMethod": "DELETE", // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ @@ -87238,8 +95012,7 @@ type RegionAutoscalersGetCall struct { header_ http.Header } -// Get: Returns the specified autoscaler. (== suppress_warning -// http-rest-shadowed ==) +// Get: Returns the specified autoscaler. func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87285,7 +95058,7 @@ func (c *RegionAutoscalersGetCall) Header() http.Header { func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87349,7 +95122,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified autoscaler.", // "httpMethod": "GET", // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ @@ -87406,7 +95179,7 @@ type RegionAutoscalersInsertCall struct { } // Insert: Creates an autoscaler in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) +// included in the request. func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87461,7 +95234,7 @@ func (c *RegionAutoscalersInsertCall) Header() http.Header { func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87526,7 +95299,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ @@ -87582,7 +95355,7 @@ type RegionAutoscalersListCall struct { } // List: Retrieves a list of autoscalers contained within the specified -// region. (== suppress_warning http-rest-shadowed ==) +// region. func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87594,24 +95367,25 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -87619,10 +95393,10 @@ func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersList // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -87633,12 +95407,13 @@ func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscal // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { c.urlParams_.Set("orderBy", orderBy) @@ -87646,7 +95421,7 @@ func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersLi } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { c.urlParams_.Set("pageToken", pageToken) @@ -87690,7 +95465,7 @@ func (c *RegionAutoscalersListCall) Header() http.Header { func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87753,7 +95528,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of autoscalers contained within the specified region.", // "httpMethod": "GET", // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ @@ -87762,25 +95537,25 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -87847,8 +95622,7 @@ type RegionAutoscalersPatchCall struct { // Patch: Updates an autoscaler in the specified project using the data // included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. (== -// suppress_warning http-rest-shadowed ==) +// uses the JSON merge patch format and processing rules. func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87910,7 +95684,7 @@ func (c *RegionAutoscalersPatchCall) Header() http.Header { func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87975,7 +95749,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ @@ -88037,7 +95811,7 @@ type RegionAutoscalersUpdateCall struct { } // Update: Updates an autoscaler in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) +// included in the request. func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88099,7 +95873,7 @@ func (c *RegionAutoscalersUpdateCall) Header() http.Header { func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88164,7 +95938,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", // "httpMethod": "PUT", // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ @@ -88225,8 +95999,7 @@ type RegionBackendServicesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified regional BackendService resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified regional BackendService resource. func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88281,7 +96054,7 @@ func (c *RegionBackendServicesDeleteCall) Header() http.Header { func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88342,7 +96115,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified regional BackendService resource.", // "httpMethod": "DELETE", // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ @@ -88403,8 +96176,7 @@ type RegionBackendServicesGetCall struct { header_ http.Header } -// Get: Returns the specified regional BackendService resource. (== -// suppress_warning http-rest-shadowed ==) +// Get: Returns the specified regional BackendService resource. func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88450,7 +96222,7 @@ func (c *RegionBackendServicesGetCall) Header() http.Header { func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88514,7 +96286,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Returns the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified regional BackendService resource.", // "httpMethod": "GET", // "id": "compute.regionBackendServices.get", // "parameterOrder": [ @@ -88572,7 +96344,7 @@ type RegionBackendServicesGetHealthCall struct { } // GetHealth: Gets the most recent health check results for this -// regional BackendService. (== suppress_warning http-rest-shadowed ==) +// regional BackendService. func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88609,7 +96381,7 @@ func (c *RegionBackendServicesGetHealthCall) Header() http.Header { func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88675,7 +96447,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the most recent health check results for this regional BackendService. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the most recent health check results for this regional BackendService.", // "httpMethod": "POST", // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ @@ -88736,8 +96508,8 @@ type RegionBackendServicesInsertCall struct { // Insert: Creates a regional BackendService resource in the specified // project using the data included in the request. There are several // restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Restrictions and Guidelines for more -// information. (== suppress_warning http-rest-shadowed ==) +// backend service. Read Understanding backend services for more +// information. func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88792,7 +96564,7 @@ func (c *RegionBackendServicesInsertCall) Header() http.Header { func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88857,7 +96629,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", // "httpMethod": "POST", // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ @@ -88913,8 +96685,7 @@ type RegionBackendServicesListCall struct { } // List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. (== -// suppress_warning http-rest-shadowed ==) +// available to the specified project in the given region. func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88926,24 +96697,25 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -88951,10 +96723,10 @@ func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServ // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -88965,12 +96737,13 @@ func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBack // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -88978,7 +96751,7 @@ func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendSe } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -89022,7 +96795,7 @@ func (c *RegionBackendServicesListCall) Header() http.Header { func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89085,7 +96858,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } return ret, nil // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", // "httpMethod": "GET", // "id": "compute.regionBackendServices.list", // "parameterOrder": [ @@ -89094,25 +96867,25 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -89179,11 +96952,11 @@ type RegionBackendServicesPatchCall struct { } // Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. (== suppress_warning http-rest-shadowed ==) +// the data included in the request. There are several Understanding +// backend services to keep in mind when updating a backend service. +// Read Understanding backend services for more information. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89239,7 +97012,7 @@ func (c *RegionBackendServicesPatchCall) Header() http.Header { func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89305,7 +97078,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ @@ -89370,10 +97143,9 @@ type RegionBackendServicesUpdateCall struct { } // Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. (== -// suppress_warning http-rest-shadowed ==) +// the data included in the request. There are several Understanding +// backend services to keep in mind when updating a backend service. +// Read Understanding backend services for more information. func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89429,7 +97201,7 @@ func (c *RegionBackendServicesUpdateCall) Header() http.Header { func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89495,7 +97267,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", // "httpMethod": "PUT", // "id": "compute.regionBackendServices.update", // "parameterOrder": [ @@ -89557,8 +97329,7 @@ type RegionCommitmentsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of commitments. func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89569,35 +97340,49 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *RegionCommitmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -89608,12 +97393,13 @@ func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *Regi // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -89621,7 +97407,7 @@ func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCom } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -89665,7 +97451,7 @@ func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89727,7 +97513,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of commitments.", // "httpMethod": "GET", // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ @@ -89735,25 +97521,30 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -89813,8 +97604,7 @@ type RegionCommitmentsGetCall struct { } // Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available commitments by making a list() request. func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89860,7 +97650,7 @@ func (c *RegionCommitmentsGetCall) Header() http.Header { func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89924,7 +97714,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } return ret, nil // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionCommitments.get", // "parameterOrder": [ @@ -89981,7 +97771,7 @@ type RegionCommitmentsInsertCall struct { } // Insert: Creates a commitment in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) +// included in the request. func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90036,7 +97826,7 @@ func (c *RegionCommitmentsInsertCall) Header() http.Header { func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90101,7 +97891,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a commitment in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionCommitments.insert", // "parameterOrder": [ @@ -90157,7 +97947,7 @@ type RegionCommitmentsListCall struct { } // List: Retrieves a list of commitments contained within the specified -// region. (== suppress_warning http-rest-shadowed ==) +// region. func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90169,24 +97959,25 @@ func (r *RegionCommitmentsService) List(project string, region string) *RegionCo // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -90194,10 +97985,10 @@ func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsList // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -90208,12 +97999,13 @@ func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitme // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -90221,7 +98013,7 @@ func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsLi } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -90265,7 +98057,7 @@ func (c *RegionCommitmentsListCall) Header() http.Header { func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90328,7 +98120,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } return ret, nil // { - // "description": "Retrieves a list of commitments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of commitments contained within the specified region.", // "httpMethod": "GET", // "id": "compute.regionCommitments.list", // "parameterOrder": [ @@ -90337,25 +98129,25 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -90422,8 +98214,7 @@ type RegionDiskTypesGetCall struct { } // Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// available disk types by making a list() request. func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90469,7 +98260,7 @@ func (c *RegionDiskTypesGetCall) Header() http.Header { func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90533,7 +98324,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionDiskTypes.get", // "parameterOrder": [ @@ -90590,7 +98381,7 @@ type RegionDiskTypesListCall struct { } // List: Retrieves a list of regional disk types available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90602,24 +98393,25 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -90627,10 +98419,10 @@ func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -90641,12 +98433,13 @@ func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -90654,7 +98447,7 @@ func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCa } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -90698,7 +98491,7 @@ func (c *RegionDiskTypesListCall) Header() http.Header { func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90761,7 +98554,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT } return ret, nil // { - // "description": "Retrieves a list of regional disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of regional disk types available to the specified project.", // "httpMethod": "GET", // "id": "compute.regionDiskTypes.list", // "parameterOrder": [ @@ -90770,25 +98563,25 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -90856,8 +98649,7 @@ type RegionDisksAddResourcePoliciesCall struct { // AddResourcePolicies: Adds existing resource policies to a regional // disk. You can only add one policy which will be applied to this disk -// for scheduling snapshot creation. (== suppress_warning -// http-rest-shadowed ==) +// for scheduling snapshot creation. func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90913,7 +98705,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90979,7 +98771,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", // "httpMethod": "POST", // "id": "compute.regionDisks.addResourcePolicies", // "parameterOrder": [ @@ -91043,8 +98835,7 @@ type RegionDisksCreateSnapshotCall struct { header_ http.Header } -// CreateSnapshot: Creates a snapshot of this regional disk. (== -// suppress_warning http-rest-shadowed ==) +// CreateSnapshot: Creates a snapshot of this regional disk. func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91100,7 +98891,7 @@ func (c *RegionDisksCreateSnapshotCall) Header() http.Header { func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91166,7 +98957,7 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of this regional disk. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a snapshot of this regional disk.", // "httpMethod": "POST", // "id": "compute.regionDisks.createSnapshot", // "parameterOrder": [ @@ -91233,7 +99024,6 @@ type RegionDisksDeleteCall struct { // regional disk removes all the replicas of its data permanently and is // irreversible. However, deleting a disk does not delete any snapshots // previously made from the disk. You must separately delete snapshots. -// (== suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91288,7 +99078,7 @@ func (c *RegionDisksDeleteCall) Header() http.Header { func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91349,7 +99139,7 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", // "httpMethod": "DELETE", // "id": "compute.regionDisks.delete", // "parameterOrder": [ @@ -91409,8 +99199,7 @@ type RegionDisksGetCall struct { header_ http.Header } -// Get: Returns a specified regional persistent disk. (== -// suppress_warning http-rest-shadowed ==) +// Get: Returns a specified regional persistent disk. func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91456,7 +99245,7 @@ func (c *RegionDisksGetCall) Header() http.Header { func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91520,7 +99309,7 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns a specified regional persistent disk.", // "httpMethod": "GET", // "id": "compute.regionDisks.get", // "parameterOrder": [ @@ -91577,8 +99366,7 @@ type RegionDisksInsertCall struct { } // Insert: Creates a persistent regional disk in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91640,7 +99428,7 @@ func (c *RegionDisksInsertCall) Header() http.Header { func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91705,7 +99493,7 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a persistent regional disk in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionDisks.insert", // "parameterOrder": [ @@ -91766,7 +99554,7 @@ type RegionDisksListCall struct { } // List: Retrieves the list of persistent disks contained within the -// specified region. (== suppress_warning http-rest-shadowed ==) +// specified region. func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91778,24 +99566,25 @@ func (r *RegionDisksService) List(project string, region string) *RegionDisksLis // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { c.urlParams_.Set("filter", filter) return c @@ -91803,10 +99592,10 @@ func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -91817,12 +99606,13 @@ func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { c.urlParams_.Set("orderBy", orderBy) @@ -91830,7 +99620,7 @@ func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { c.urlParams_.Set("pageToken", pageToken) @@ -91874,7 +99664,7 @@ func (c *RegionDisksListCall) Header() http.Header { func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91937,7 +99727,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error } return ret, nil // { - // "description": "Retrieves the list of persistent disks contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of persistent disks contained within the specified region.", // "httpMethod": "GET", // "id": "compute.regionDisks.list", // "parameterOrder": [ @@ -91946,25 +99736,25 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -92031,7 +99821,7 @@ type RegionDisksRemoveResourcePoliciesCall struct { } // RemoveResourcePolicies: Removes resource policies from a regional -// disk. (== suppress_warning http-rest-shadowed ==) +// disk. func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92087,7 +99877,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92153,7 +99943,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Removes resource policies from a regional disk. (== suppress_warning http-rest-shadowed ==)", + // "description": "Removes resource policies from a regional disk.", // "httpMethod": "POST", // "id": "compute.regionDisks.removeResourcePolicies", // "parameterOrder": [ @@ -92217,8 +100007,7 @@ type RegionDisksResizeCall struct { header_ http.Header } -// Resize: Resizes the specified regional persistent disk. (== -// suppress_warning http-rest-shadowed ==) +// Resize: Resizes the specified regional persistent disk. func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92274,7 +100063,7 @@ func (c *RegionDisksResizeCall) Header() http.Header { func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92340,7 +100129,7 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", + // "description": "Resizes the specified regional persistent disk.", // "httpMethod": "POST", // "id": "compute.regionDisks.resize", // "parameterOrder": [ @@ -92404,8 +100193,7 @@ type RegionDisksSetLabelsCall struct { header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. (== -// suppress_warning http-rest-shadowed ==) +// SetLabels: Sets the labels on the target regional disk. func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92461,7 +100249,7 @@ func (c *RegionDisksSetLabelsCall) Header() http.Header { func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92527,7 +100315,7 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the labels on the target regional disk.", // "httpMethod": "POST", // "id": "compute.regionDisks.setLabels", // "parameterOrder": [ @@ -92592,7 +100380,7 @@ type RegionDisksTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) +// specified resource. func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92629,7 +100417,7 @@ func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92695,7 +100483,7 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", // "id": "compute.regionDisks.testIamPermissions", // "parameterOrder": [ @@ -92754,8 +100542,7 @@ type RegionHealthChecksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified HealthCheck resource. func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92810,7 +100597,7 @@ func (c *RegionHealthChecksDeleteCall) Header() http.Header { func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92871,7 +100658,7 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified HealthCheck resource.", // "httpMethod": "DELETE", // "id": "compute.regionHealthChecks.delete", // "parameterOrder": [ @@ -92933,8 +100720,7 @@ type RegionHealthChecksGetCall struct { } // Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available health checks by making a list() request. func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92980,7 +100766,7 @@ func (c *RegionHealthChecksGetCall) Header() http.Header { func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93044,7 +100830,7 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionHealthChecks.get", // "parameterOrder": [ @@ -93101,8 +100887,7 @@ type RegionHealthChecksInsertCall struct { } // Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// the data included in the request. func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93157,7 +100942,7 @@ func (c *RegionHealthChecksInsertCall) Header() http.Header { func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93222,7 +101007,7 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionHealthChecks.insert", // "parameterOrder": [ @@ -93278,7 +101063,7 @@ type RegionHealthChecksListCall struct { } // List: Retrieves the list of HealthCheck resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93290,24 +101075,25 @@ func (r *RegionHealthChecksService) List(project string, region string) *RegionH // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -93315,10 +101101,10 @@ func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksLi // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -93329,12 +101115,13 @@ func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthC // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) @@ -93342,7 +101129,7 @@ func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecks } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) @@ -93386,7 +101173,7 @@ func (c *RegionHealthChecksListCall) Header() http.Header { func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93449,7 +101236,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.regionHealthChecks.list", // "parameterOrder": [ @@ -93458,25 +101245,25 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -93545,7 +101332,6 @@ type RegionHealthChecksPatchCall struct { // Patch: Updates a HealthCheck resource in the specified project using // the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. -// (== suppress_warning http-rest-shadowed ==) func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93601,7 +101387,7 @@ func (c *RegionHealthChecksPatchCall) Header() http.Header { func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93667,7 +101453,7 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionHealthChecks.patch", // "parameterOrder": [ @@ -93732,8 +101518,7 @@ type RegionHealthChecksUpdateCall struct { } // Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// the data included in the request. func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93789,7 +101574,7 @@ func (c *RegionHealthChecksUpdateCall) Header() http.Header { func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93855,19 +101640,386 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "PUT", - // "id": "compute.regionHealthChecks.update", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionHealthChecks.update", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.abandonInstances": + +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AbandonInstances: Flags the specified instances to be immediately +// removed from the managed instance group. Abandoning an instance does +// not delete the instance, but it does remove the instance from any +// target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": + +type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ApplyUpdatesToInstances: Apply updates to selected instances the +// managed instance group. +func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Apply updates to selected instances the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -93879,21 +102031,15 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", // "request": { - // "$ref": "HealthCheck" + // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" // }, // "response": { // "$ref": "Operation" @@ -93906,42 +102052,31 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionInstanceGroupManagers.createInstances": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be immediately -// removed from the managed instance group. Abandoning an instance does -// not delete the instance, but it does remove the instance from any -// target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. (== suppress_warning http-rest-shadowed ==) -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateInstances: Creates instances with per-instance configs in this +// regional managed instance group. Instances are created using the +// current instance template. The create instances operation is marked +// DONE if the createInstances request is successful. The underlying +// actions take additional time. You must separately verify the status +// of the creating or actions with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { + c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + c.regioninstancegroupmanagerscreateinstancesrequest = regioninstancegroupmanagerscreateinstancesrequest return c } @@ -93954,12 +102089,11 @@ func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, re // and the request times out. If you make the request again with the // same request ID, the server can check if original operation with the // same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// request. // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -93967,7 +102101,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93975,36 +102109,36 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersCreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerscreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -94019,14 +102153,14 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.createInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94057,9 +102191,9 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "id": "compute.regionInstanceGroupManagers.createInstances", // "parameterOrder": [ // "project", // "region", @@ -94067,7 +102201,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -94080,20 +102214,20 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -94119,7 +102253,7 @@ type RegionInstanceGroupManagersDeleteCall struct { } // Delete: Deletes the specified managed instance group and all of the -// instances in that group. (== suppress_warning http-rest-shadowed ==) +// instances in that group. func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94174,7 +102308,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94235,7 +102369,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified managed instance group and all of the instances in that group.", // "httpMethod": "DELETE", // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ @@ -94310,7 +102444,7 @@ type RegionInstanceGroupManagersDeleteInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. (== suppress_warning http-rest-shadowed ==) +// request. func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94366,7 +102500,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94432,7 +102566,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ @@ -94495,7 +102629,7 @@ type RegionInstanceGroupManagersGetCall struct { } // Get: Returns all of the details about the specified managed instance -// group. (== suppress_warning http-rest-shadowed ==) +// group. func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94541,7 +102675,7 @@ func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94605,7 +102739,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns all of the details about the specified managed instance group.", // "httpMethod": "GET", // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ @@ -94668,7 +102802,6 @@ type RegionInstanceGroupManagersInsertCall struct { // listmanagedinstances method. // // A regional managed instance group can contain up to 2000 instances. -// (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94723,7 +102856,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94788,7 +102921,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ @@ -94843,8 +102976,7 @@ type RegionInstanceGroupManagersListCall struct { } // List: Retrieves the list of managed instance groups that are -// contained within the specified region. (== suppress_warning -// http-rest-shadowed ==) +// contained within the specified region. func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94856,24 +102988,25 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -94881,10 +103014,10 @@ func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInsta // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -94895,12 +103028,13 @@ func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *Regi // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) @@ -94908,7 +103042,7 @@ func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionIns } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) @@ -94952,7 +103086,7 @@ func (c *RegionInstanceGroupManagersListCall) Header() http.Header { func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95015,7 +103149,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", // "httpMethod": "GET", // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ @@ -95024,25 +103158,25 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -95094,6 +103228,283 @@ func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func( } } +// method id "compute.regionInstanceGroupManagers.listErrors": + +type RegionInstanceGroupManagersListErrorsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListErrors: Lists all errors thrown by actions on instances for a +// given regional managed instance group. +func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { + c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionInstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListErrorsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersListErrorsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListErrorsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.listErrors" call. +// Exactly one of *RegionInstanceGroupManagersListErrorsResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListErrorsResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListErrorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionInstanceGroupManagersListErrorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.listErrors", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request. This should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + // "response": { + // "$ref": "RegionInstanceGroupManagersListErrorsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListErrorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionInstanceGroupManagers.listManagedInstances": type RegionInstanceGroupManagersListManagedInstancesCall struct { @@ -95109,7 +103520,7 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // ListManagedInstances: Lists the instances in the managed instance // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its -// instances. (== suppress_warning http-rest-shadowed ==) +// instances. func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95122,24 +103533,25 @@ func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -95147,34 +103559,35 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter stri // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("pageToken", pageToken) @@ -95208,7 +103621,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Head func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95271,7 +103684,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ @@ -95281,7 +103694,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, @@ -95293,19 +103706,19 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -95336,6 +103749,27 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionInstanceGroupManagers.patch": type RegionInstanceGroupManagersPatchCall struct { @@ -95355,7 +103789,7 @@ type RegionInstanceGroupManagersPatchCall struct { // process of being patched. You must separately verify the status of // the individual instances with the listmanagedinstances method. This // method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. (== suppress_warning http-rest-shadowed ==) +// and processing rules. func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95411,7 +103845,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95477,7 +103911,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ @@ -95553,7 +103987,7 @@ type RegionInstanceGroupManagersRecreateInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. (== suppress_warning http-rest-shadowed ==) +// request. func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95609,7 +104043,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95675,7 +104109,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ @@ -95749,7 +104183,6 @@ type RegionInstanceGroupManagersResizeCall struct { // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. -// (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95805,7 +104238,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95866,7 +104299,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ @@ -95936,8 +104369,7 @@ type RegionInstanceGroupManagersSetInstanceTemplateCall struct { // SetInstanceTemplate: Sets the instance template to use when creating // new instances or recreating instances in this group. Existing -// instances are not affected. (== suppress_warning http-rest-shadowed -// ==) +// instances are not affected. func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95993,7 +104425,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Heade func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96059,7 +104491,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ @@ -96123,7 +104555,7 @@ type RegionInstanceGroupManagersSetTargetPoolsCall struct { // SetTargetPools: Modifies the target pools to which all new instances // in this group are assigned. Existing instances in the group are not -// affected. (== suppress_warning http-rest-shadowed ==) +// affected. func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96179,7 +104611,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96245,7 +104677,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected. (== suppress_warning http-rest-shadowed ==)", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ @@ -96307,8 +104739,7 @@ type RegionInstanceGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified instance group resource. (== -// suppress_warning http-rest-shadowed ==) +// Get: Returns the specified instance group resource. func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96354,7 +104785,7 @@ func (c *RegionInstanceGroupsGetCall) Header() http.Header { func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96418,7 +104849,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified instance group resource.", // "httpMethod": "GET", // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ @@ -96473,7 +104904,7 @@ type RegionInstanceGroupsListCall struct { } // List: Retrieves the list of instance group resources contained within -// the specified region. (== suppress_warning http-rest-shadowed ==) +// the specified region. func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96485,24 +104916,25 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -96510,10 +104942,10 @@ func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGrou // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -96524,12 +104956,13 @@ func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInsta // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -96537,7 +104970,7 @@ func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGr } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -96581,7 +105014,7 @@ func (c *RegionInstanceGroupsListCall) Header() http.Header { func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96644,7 +105077,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of instance group resources contained within the specified region.", // "httpMethod": "GET", // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ @@ -96653,25 +105086,25 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -96739,8 +105172,7 @@ type RegionInstanceGroupsListInstancesCall struct { // ListInstances: Lists the instances in the specified instance group // and displays information about the named ports. Depending on the // specified options, this method can list all instances or only the -// instances that are running. (== suppress_warning http-rest-shadowed -// ==) +// instances that are running. func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96754,24 +105186,25 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -96779,10 +105212,10 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -96793,12 +105226,13 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("orderBy", orderBy) @@ -96806,7 +105240,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) @@ -96840,7 +105274,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96907,7 +105341,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ @@ -96917,7 +105351,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, @@ -96929,19 +105363,19 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -97010,7 +105444,7 @@ type RegionInstanceGroupsSetNamedPortsCall struct { } // SetNamedPorts: Sets the named ports for the specified regional -// instance group. (== suppress_warning http-rest-shadowed ==) +// instance group. func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97066,7 +105500,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97132,7 +105566,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the named ports for the specified regional instance group.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ @@ -97194,7 +105628,6 @@ type RegionOperationsDeleteCall struct { } // Delete: Deletes the specified region-specific Operations resource. -// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -97231,7 +105664,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97267,7 +105700,7 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Deletes the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified region-specific Operations resource.", // "httpMethod": "DELETE", // "id": "compute.regionOperations.delete", // "parameterOrder": [ @@ -97320,8 +105753,7 @@ type RegionOperationsGetCall struct { header_ http.Header } -// Get: Retrieves the specified region-specific Operations resource. (== -// suppress_warning http-rest-shadowed ==) +// Get: Retrieves the specified region-specific Operations resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -97368,7 +105800,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97432,7 +105864,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the specified region-specific Operations resource.", // "httpMethod": "GET", // "id": "compute.regionOperations.get", // "parameterOrder": [ @@ -97463,7 +105895,440 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionOperations.list": + +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionOperations.wait": + +type RegionOperationsWaitCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. +// +// This method is called on a best-effort basis. Specifically: +// - In uncommon cases, when the server is overloaded, the request might +// return before the default deadline is reached, or might return after +// zero seconds. +// - If the default deadline is reached, there is no guarantee that the +// operation is actually done when the method returns. Be prepared to +// retry if the operation is not `DONE`. +func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { + c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsWaitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}/wait") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionOperations.wait" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + // "httpMethod": "POST", + // "id": "compute.regionOperations.wait", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations/{operation}/wait", // "response": { // "$ref": "Operation" // }, @@ -97476,271 +106341,6 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionOperations.list": - -type RegionOperationsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of Operation resources contained within the -// specified region. (== suppress_warning http-rest-shadowed ==) -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of Operation resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", - // "httpMethod": "GET", - // "id": "compute.regionOperations.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/operations", - // "response": { - // "$ref": "OperationList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "compute.regionSslCertificates.delete": type RegionSslCertificatesDeleteCall struct { @@ -97754,7 +106354,6 @@ type RegionSslCertificatesDeleteCall struct { } // Delete: Deletes the specified SslCertificate resource in the region. -// (== suppress_warning http-rest-shadowed ==) func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97809,7 +106408,7 @@ func (c *RegionSslCertificatesDeleteCall) Header() http.Header { func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97870,7 +106469,7 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource in the region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified SslCertificate resource in the region.", // "httpMethod": "DELETE", // "id": "compute.regionSslCertificates.delete", // "parameterOrder": [ @@ -97933,7 +106532,7 @@ type RegionSslCertificatesGetCall struct { // Get: Returns the specified SslCertificate resource in the specified // region. Get a list of available SSL certificates by making a list() -// request. (== suppress_warning http-rest-shadowed ==) +// request. func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97979,7 +106578,7 @@ func (c *RegionSslCertificatesGetCall) Header() http.Header { func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98043,7 +106642,7 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } return ret, nil // { - // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionSslCertificates.get", // "parameterOrder": [ @@ -98100,8 +106699,7 @@ type RegionSslCertificatesInsertCall struct { } // Insert: Creates a SslCertificate resource in the specified project -// and region using the data included in the request (== -// suppress_warning http-rest-shadowed ==) +// and region using the data included in the request func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98156,7 +106754,7 @@ func (c *RegionSslCertificatesInsertCall) Header() http.Header { func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98221,7 +106819,7 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", // "httpMethod": "POST", // "id": "compute.regionSslCertificates.insert", // "parameterOrder": [ @@ -98277,8 +106875,7 @@ type RegionSslCertificatesListCall struct { } // List: Retrieves the list of SslCertificate resources available to the -// specified project in the specified region. (== suppress_warning -// http-rest-shadowed ==) +// specified project in the specified region. func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98290,24 +106887,25 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -98315,10 +106913,10 @@ func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertific // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -98329,12 +106927,13 @@ func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslC // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -98342,7 +106941,7 @@ func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertif } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -98386,7 +106985,7 @@ func (c *RegionSslCertificatesListCall) Header() http.Header { func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98449,7 +107048,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", // "httpMethod": "GET", // "id": "compute.regionSslCertificates.list", // "parameterOrder": [ @@ -98458,25 +107057,25 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -98541,8 +107140,7 @@ type RegionTargetHttpProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetHttpProxy resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetHttpProxy resource. func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98597,7 +107195,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98658,7 +107256,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetHttpProxy resource.", // "httpMethod": "DELETE", // "id": "compute.regionTargetHttpProxies.delete", // "parameterOrder": [ @@ -98721,7 +107319,7 @@ type RegionTargetHttpProxiesGetCall struct { // Get: Returns the specified TargetHttpProxy resource in the specified // region. Gets a list of available target HTTP proxies by making a -// list() request. (== suppress_warning http-rest-shadowed ==) +// list() request. func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98767,7 +107365,7 @@ func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98831,7 +107429,7 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionTargetHttpProxies.get", // "parameterOrder": [ @@ -98888,8 +107486,7 @@ type RegionTargetHttpProxiesInsertCall struct { } // Insert: Creates a TargetHttpProxy resource in the specified project -// and region using the data included in the request. (== -// suppress_warning http-rest-shadowed ==) +// and region using the data included in the request. func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98944,7 +107541,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99009,7 +107606,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionTargetHttpProxies.insert", // "parameterOrder": [ @@ -99065,8 +107662,7 @@ type RegionTargetHttpProxiesListCall struct { } // List: Retrieves the list of TargetHttpProxy resources available to -// the specified project in the specified region. (== suppress_warning -// http-rest-shadowed ==) +// the specified project in the specified region. func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99078,24 +107674,25 @@ func (r *RegionTargetHttpProxiesService) List(project string, region string) *Re // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -99103,10 +107700,10 @@ func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHtt // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -99117,12 +107714,13 @@ func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTa // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -99130,7 +107728,7 @@ func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetH } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -99174,7 +107772,7 @@ func (c *RegionTargetHttpProxiesListCall) Header() http.Header { func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99237,7 +107835,7 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", // "httpMethod": "GET", // "id": "compute.regionTargetHttpProxies.list", // "parameterOrder": [ @@ -99246,25 +107844,25 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -99330,8 +107928,7 @@ type RegionTargetHttpProxiesSetUrlMapCall struct { header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetUrlMap: Changes the URL map for TargetHttpProxy. func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99387,7 +107984,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99453,7 +108050,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the URL map for TargetHttpProxy.", // "httpMethod": "POST", // "id": "compute.regionTargetHttpProxies.setUrlMap", // "parameterOrder": [ @@ -99516,8 +108113,7 @@ type RegionTargetHttpsProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetHttpsProxy resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetHttpsProxy resource. func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99572,7 +108168,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99633,7 +108229,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetHttpsProxy resource.", // "httpMethod": "DELETE", // "id": "compute.regionTargetHttpsProxies.delete", // "parameterOrder": [ @@ -99696,7 +108292,7 @@ type RegionTargetHttpsProxiesGetCall struct { // Get: Returns the specified TargetHttpsProxy resource in the specified // region. Gets a list of available target HTTP proxies by making a -// list() request. (== suppress_warning http-rest-shadowed ==) +// list() request. func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99742,7 +108338,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99806,7 +108402,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionTargetHttpsProxies.get", // "parameterOrder": [ @@ -99863,8 +108459,7 @@ type RegionTargetHttpsProxiesInsertCall struct { } // Insert: Creates a TargetHttpsProxy resource in the specified project -// and region using the data included in the request. (== -// suppress_warning http-rest-shadowed ==) +// and region using the data included in the request. func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99919,7 +108514,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99984,7 +108579,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionTargetHttpsProxies.insert", // "parameterOrder": [ @@ -100040,8 +108635,7 @@ type RegionTargetHttpsProxiesListCall struct { } // List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project in the specified region. (== suppress_warning -// http-rest-shadowed ==) +// the specified project in the specified region. func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100053,24 +108647,25 @@ func (r *RegionTargetHttpsProxiesService) List(project string, region string) *R // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -100078,10 +108673,10 @@ func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHt // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -100092,12 +108687,13 @@ func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionT // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -100105,7 +108701,7 @@ func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTarget } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -100149,7 +108745,7 @@ func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100212,7 +108808,7 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta } return ret, nil // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", // "httpMethod": "GET", // "id": "compute.regionTargetHttpsProxies.list", // "parameterOrder": [ @@ -100221,25 +108817,25 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -100306,7 +108902,6 @@ type RegionTargetHttpsProxiesSetSslCertificatesCall struct { } // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -// (== suppress_warning http-rest-shadowed ==) func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100362,7 +108957,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100428,7 +109023,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Replaces SslCertificates for TargetHttpsProxy.", // "httpMethod": "POST", // "id": "compute.regionTargetHttpsProxies.setSslCertificates", // "parameterOrder": [ @@ -100492,8 +109087,7 @@ type RegionTargetHttpsProxiesSetUrlMapCall struct { header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetUrlMap: Changes the URL map for TargetHttpsProxy. func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100549,7 +109143,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100615,7 +109209,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the URL map for TargetHttpsProxy.", // "httpMethod": "POST", // "id": "compute.regionTargetHttpsProxies.setUrlMap", // "parameterOrder": [ @@ -100678,8 +109272,7 @@ type RegionUrlMapsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified UrlMap resource. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified UrlMap resource. func (r *RegionUrlMapsService) Delete(project string, region string, urlMap string) *RegionUrlMapsDeleteCall { c := &RegionUrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100722,7 +109315,7 @@ func (c *RegionUrlMapsDeleteCall) Header() http.Header { func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100783,7 +109376,7 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified UrlMap resource.", // "httpMethod": "DELETE", // "id": "compute.regionUrlMaps.delete", // "parameterOrder": [ @@ -100845,8 +109438,7 @@ type RegionUrlMapsGetCall struct { } // Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// URL maps by making a list() request. func (r *RegionUrlMapsService) Get(project string, region string, urlMap string) *RegionUrlMapsGetCall { c := &RegionUrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100892,7 +109484,7 @@ func (c *RegionUrlMapsGetCall) Header() http.Header { func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100956,7 +109548,7 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", // "httpMethod": "GET", // "id": "compute.regionUrlMaps.get", // "parameterOrder": [ @@ -101013,8 +109605,7 @@ type RegionUrlMapsInsertCall struct { } // Insert: Creates a UrlMap resource in the specified project using the -// data included in the request. (== suppress_warning http-rest-shadowed -// ==) +// data included in the request. func (r *RegionUrlMapsService) Insert(project string, region string, urlmap *UrlMap) *RegionUrlMapsInsertCall { c := &RegionUrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101057,7 +109648,7 @@ func (c *RegionUrlMapsInsertCall) Header() http.Header { func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101122,7 +109713,7 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.regionUrlMaps.insert", // "parameterOrder": [ @@ -101178,8 +109769,7 @@ type RegionUrlMapsListCall struct { } // List: Retrieves the list of UrlMap resources available to the -// specified project in the specified region. (== suppress_warning -// http-rest-shadowed ==) +// specified project in the specified region. func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMapsListCall { c := &RegionUrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101191,24 +109781,25 @@ func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMap // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -101216,10 +109807,10 @@ func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -101230,12 +109821,13 @@ func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListC // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -101243,7 +109835,7 @@ func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -101287,7 +109879,7 @@ func (c *RegionUrlMapsListCall) Header() http.Header { func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101350,7 +109942,7 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e } return ret, nil // { - // "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", // "httpMethod": "GET", // "id": "compute.regionUrlMaps.list", // "parameterOrder": [ @@ -101359,25 +109951,25 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -101445,8 +110037,7 @@ type RegionUrlMapsPatchCall struct { // Patch: Patches the specified UrlMap resource with the data included // in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. (== suppress_warning -// http-rest-shadowed ==) +// merge patch format and processing rules. func (r *RegionUrlMapsService) Patch(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsPatchCall { c := &RegionUrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101490,7 +110081,7 @@ func (c *RegionUrlMapsPatchCall) Header() http.Header { func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101556,7 +110147,7 @@ func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.regionUrlMaps.patch", // "parameterOrder": [ @@ -101621,7 +110212,7 @@ type RegionUrlMapsUpdateCall struct { } // Update: Updates the specified UrlMap resource with the data included -// in the request. (== suppress_warning http-rest-shadowed ==) +// in the request. func (r *RegionUrlMapsService) Update(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsUpdateCall { c := &RegionUrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101665,7 +110256,7 @@ func (c *RegionUrlMapsUpdateCall) Header() http.Header { func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101731,7 +110322,7 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified UrlMap resource with the data included in the request.", // "httpMethod": "PUT", // "id": "compute.regionUrlMaps.update", // "parameterOrder": [ @@ -101797,7 +110388,7 @@ type RegionUrlMapsValidateCall struct { // Validate: Runs static validation for the UrlMap. In particular, the // tests of the provided UrlMap will be run. Calling this method does -// NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==) +// NOT create the UrlMap. func (r *RegionUrlMapsService) Validate(project string, region string, urlMap string, regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest) *RegionUrlMapsValidateCall { c := &RegionUrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101834,7 +110425,7 @@ func (c *RegionUrlMapsValidateCall) Header() http.Header { func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101900,7 +110491,7 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa } return ret, nil // { - // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", // "httpMethod": "POST", // "id": "compute.regionUrlMaps.validate", // "parameterOrder": [ @@ -101959,8 +110550,7 @@ type RegionsGetCall struct { } // Get: Returns the specified Region resource. Gets a list of available -// regions by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// regions by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get func (r *RegionsService) Get(project string, region string) *RegionsGetCall { c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -102006,7 +110596,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102069,7 +110659,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", // "httpMethod": "GET", // "id": "compute.regions.get", // "parameterOrder": [ @@ -102117,7 +110707,7 @@ type RegionsListCall struct { } // List: Retrieves the list of region resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. // For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list func (r *RegionsService) List(project string) *RegionsListCall { c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -102129,24 +110719,25 @@ func (r *RegionsService) List(project string) *RegionsListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c @@ -102154,10 +110745,10 @@ func (c *RegionsListCall) Filter(filter string) *RegionsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -102168,12 +110759,13 @@ func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -102181,7 +110773,7 @@ func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -102225,7 +110817,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102287,7 +110879,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } return ret, nil // { - // "description": "Retrieves the list of region resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of region resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.regions.list", // "parameterOrder": [ @@ -102295,25 +110887,25 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -102370,8 +110962,7 @@ type ReservationsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of reservations. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of reservations. func (r *ReservationsService) AggregatedList(project string) *ReservationsAggregatedListCall { c := &ReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102382,35 +110973,49 @@ func (r *ReservationsService) AggregatedList(project string) *ReservationsAggreg // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ReservationsAggregatedListCall) Filter(filter string) *ReservationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *ReservationsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *ReservationsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ReservationsAggregatedListCall) MaxResults(maxResults int64) *ReservationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -102421,12 +111026,13 @@ func (c *ReservationsAggregatedListCall) MaxResults(maxResults int64) *Reservati // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ReservationsAggregatedListCall) OrderBy(orderBy string) *ReservationsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -102434,7 +111040,7 @@ func (c *ReservationsAggregatedListCall) OrderBy(orderBy string) *ReservationsAg } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *ReservationsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -102478,7 +111084,7 @@ func (c *ReservationsAggregatedListCall) Header() http.Header { func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102540,7 +111146,7 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese } return ret, nil // { - // "description": "Retrieves an aggregated list of reservations. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of reservations.", // "httpMethod": "GET", // "id": "compute.reservations.aggregatedList", // "parameterOrder": [ @@ -102548,25 +111154,30 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -102624,8 +111235,7 @@ type ReservationsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified reservation. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified reservation. func (r *ReservationsService) Delete(project string, zone string, reservation string) *ReservationsDeleteCall { c := &ReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102680,7 +111290,7 @@ func (c *ReservationsDeleteCall) Header() http.Header { func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102741,7 +111351,7 @@ func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified reservation. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified reservation.", // "httpMethod": "DELETE", // "id": "compute.reservations.delete", // "parameterOrder": [ @@ -102802,8 +111412,7 @@ type ReservationsGetCall struct { header_ http.Header } -// Get: Retrieves information about the specified reservation. (== -// suppress_warning http-rest-shadowed ==) +// Get: Retrieves information about the specified reservation. func (r *ReservationsService) Get(project string, zone string, reservation string) *ReservationsGetCall { c := &ReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102849,7 +111458,7 @@ func (c *ReservationsGetCall) Header() http.Header { func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102913,7 +111522,7 @@ func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, er } return ret, nil // { - // "description": "Retrieves information about the specified reservation. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves information about the specified reservation.", // "httpMethod": "GET", // "id": "compute.reservations.get", // "parameterOrder": [ @@ -102971,8 +111580,7 @@ type ReservationsGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) +// empty if no such policy or resource exists. func (r *ReservationsService) GetIamPolicy(project string, zone string, resource string) *ReservationsGetIamPolicyCall { c := &ReservationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103018,7 +111626,7 @@ func (c *ReservationsGetIamPolicyCall) Header() http.Header { func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103082,7 +111690,7 @@ func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", // "id": "compute.reservations.getIamPolicy", // "parameterOrder": [ @@ -103139,8 +111747,7 @@ type ReservationsInsertCall struct { } // Insert: Creates a new reservation. For more information, read -// Reserving zonal resources. (== suppress_warning http-rest-shadowed -// ==) +// Reserving zonal resources. func (r *ReservationsService) Insert(project string, zone string, reservation *Reservation) *ReservationsInsertCall { c := &ReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103195,7 +111802,7 @@ func (c *ReservationsInsertCall) Header() http.Header { func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103260,7 +111867,7 @@ func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a new reservation. For more information, read Reserving zonal resources. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a new reservation. For more information, read Reserving zonal resources.", // "httpMethod": "POST", // "id": "compute.reservations.insert", // "parameterOrder": [ @@ -103316,8 +111923,7 @@ type ReservationsListCall struct { } // List: A list of all the reservations that have been configured for -// the specified project in specified zone. (== suppress_warning -// http-rest-shadowed ==) +// the specified project in specified zone. func (r *ReservationsService) List(project string, zone string) *ReservationsListCall { c := &ReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103329,24 +111935,25 @@ func (r *ReservationsService) List(project string, zone string) *ReservationsLis // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { c.urlParams_.Set("filter", filter) return c @@ -103354,10 +111961,10 @@ func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ReservationsListCall) MaxResults(maxResults int64) *ReservationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -103368,12 +111975,13 @@ func (c *ReservationsListCall) MaxResults(maxResults int64) *ReservationsListCal // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ReservationsListCall) OrderBy(orderBy string) *ReservationsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -103381,7 +111989,7 @@ func (c *ReservationsListCall) OrderBy(orderBy string) *ReservationsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -103425,7 +112033,7 @@ func (c *ReservationsListCall) Header() http.Header { func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103488,7 +112096,7 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis } return ret, nil // { - // "description": "A list of all the reservations that have been configured for the specified project in specified zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "A list of all the reservations that have been configured for the specified project in specified zone.", // "httpMethod": "GET", // "id": "compute.reservations.list", // "parameterOrder": [ @@ -103497,25 +112105,25 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -103583,7 +112191,7 @@ type ReservationsResizeCall struct { // Resize: Resizes the reservation (applicable to standalone // reservations only). For more information, read Modifying -// reservations. (== suppress_warning http-rest-shadowed ==) +// reservations. func (r *ReservationsService) Resize(project string, zone string, reservation string, reservationsresizerequest *ReservationsResizeRequest) *ReservationsResizeCall { c := &ReservationsResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103639,7 +112247,7 @@ func (c *ReservationsResizeCall) Header() http.Header { func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103705,7 +112313,7 @@ func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations. (== suppress_warning http-rest-shadowed ==)", + // "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations.", // "httpMethod": "POST", // "id": "compute.reservations.resize", // "parameterOrder": [ @@ -103770,8 +112378,7 @@ type ReservationsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) +// resource. Replaces any existing policy. func (r *ReservationsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *ReservationsSetIamPolicyCall { c := &ReservationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103808,7 +112415,7 @@ func (c *ReservationsSetIamPolicyCall) Header() http.Header { func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103874,7 +112481,7 @@ func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", // "id": "compute.reservations.setIamPolicy", // "parameterOrder": [ @@ -103934,7 +112541,7 @@ type ReservationsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) +// specified resource. func (r *ReservationsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *ReservationsTestIamPermissionsCall { c := &ReservationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103971,7 +112578,7 @@ func (c *ReservationsTestIamPermissionsCall) Header() http.Header { func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104037,7 +112644,7 @@ func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", // "id": "compute.reservations.testIamPermissions", // "parameterOrder": [ @@ -104096,7 +112703,6 @@ type ResourcePoliciesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of resource policies. -// (== suppress_warning http-rest-shadowed ==) func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePoliciesAggregatedListCall { c := &ResourcePoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104107,35 +112713,49 @@ func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePolici // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *ResourcePoliciesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ResourcePoliciesAggregatedListCall) MaxResults(maxResults int64) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -104146,12 +112766,13 @@ func (c *ResourcePoliciesAggregatedListCall) MaxResults(maxResults int64) *Resou // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ResourcePoliciesAggregatedListCall) OrderBy(orderBy string) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -104159,7 +112780,7 @@ func (c *ResourcePoliciesAggregatedListCall) OrderBy(orderBy string) *ResourcePo } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -104203,7 +112824,7 @@ func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104265,7 +112886,7 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of resource policies. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of resource policies.", // "httpMethod": "GET", // "id": "compute.resourcePolicies.aggregatedList", // "parameterOrder": [ @@ -104273,25 +112894,30 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -104349,8 +112975,7 @@ type ResourcePoliciesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified resource policy. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified resource policy. func (r *ResourcePoliciesService) Delete(project string, region string, resourcePolicy string) *ResourcePoliciesDeleteCall { c := &ResourcePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104405,7 +113030,7 @@ func (c *ResourcePoliciesDeleteCall) Header() http.Header { func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104466,7 +113091,7 @@ func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified resource policy.", // "httpMethod": "DELETE", // "id": "compute.resourcePolicies.delete", // "parameterOrder": [ @@ -104527,8 +113152,7 @@ type ResourcePoliciesGetCall struct { header_ http.Header } -// Get: Retrieves all information of the specified resource policy. (== -// suppress_warning http-rest-shadowed ==) +// Get: Retrieves all information of the specified resource policy. func (r *ResourcePoliciesService) Get(project string, region string, resourcePolicy string) *ResourcePoliciesGetCall { c := &ResourcePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104574,7 +113198,7 @@ func (c *ResourcePoliciesGetCall) Header() http.Header { func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104638,7 +113262,7 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol } return ret, nil // { - // "description": "Retrieves all information of the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves all information of the specified resource policy.", // "httpMethod": "GET", // "id": "compute.resourcePolicies.get", // "parameterOrder": [ @@ -104696,8 +113320,7 @@ type ResourcePoliciesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) +// empty if no such policy or resource exists. func (r *ResourcePoliciesService) GetIamPolicy(project string, region string, resource string) *ResourcePoliciesGetIamPolicyCall { c := &ResourcePoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104743,7 +113366,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104807,7 +113430,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", // "id": "compute.resourcePolicies.getIamPolicy", // "parameterOrder": [ @@ -104863,8 +113486,7 @@ type ResourcePoliciesInsertCall struct { header_ http.Header } -// Insert: Creates a new resource policy. (== suppress_warning -// http-rest-shadowed ==) +// Insert: Creates a new resource policy. func (r *ResourcePoliciesService) Insert(project string, region string, resourcepolicy *ResourcePolicy) *ResourcePoliciesInsertCall { c := &ResourcePoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104919,7 +113541,7 @@ func (c *ResourcePoliciesInsertCall) Header() http.Header { func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104984,7 +113606,7 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new resource policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a new resource policy.", // "httpMethod": "POST", // "id": "compute.resourcePolicies.insert", // "parameterOrder": [ @@ -105040,8 +113662,7 @@ type ResourcePoliciesListCall struct { } // List: A list all the resource policies that have been configured for -// the specified project in specified region. (== suppress_warning -// http-rest-shadowed ==) +// the specified project in specified region. func (r *ResourcePoliciesService) List(project string, region string) *ResourcePoliciesListCall { c := &ResourcePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105053,24 +113674,25 @@ func (r *ResourcePoliciesService) List(project string, region string) *ResourceP // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -105078,10 +113700,10 @@ func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCa // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ResourcePoliciesListCall) MaxResults(maxResults int64) *ResourcePoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -105092,12 +113714,13 @@ func (c *ResourcePoliciesListCall) MaxResults(maxResults int64) *ResourcePolicie // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ResourcePoliciesListCall) OrderBy(orderBy string) *ResourcePoliciesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -105105,7 +113728,7 @@ func (c *ResourcePoliciesListCall) OrderBy(orderBy string) *ResourcePoliciesList } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePoliciesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -105149,7 +113772,7 @@ func (c *ResourcePoliciesListCall) Header() http.Header { func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105212,7 +113835,7 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo } return ret, nil // { - // "description": "A list all the resource policies that have been configured for the specified project in specified region. (== suppress_warning http-rest-shadowed ==)", + // "description": "A list all the resource policies that have been configured for the specified project in specified region.", // "httpMethod": "GET", // "id": "compute.resourcePolicies.list", // "parameterOrder": [ @@ -105221,25 +113844,25 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -105306,8 +113929,7 @@ type ResourcePoliciesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) +// resource. Replaces any existing policy. func (r *ResourcePoliciesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *ResourcePoliciesSetIamPolicyCall { c := &ResourcePoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105344,7 +113966,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105410,7 +114032,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", // "id": "compute.resourcePolicies.setIamPolicy", // "parameterOrder": [ @@ -105470,7 +114092,7 @@ type ResourcePoliciesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) +// specified resource. func (r *ResourcePoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ResourcePoliciesTestIamPermissionsCall { c := &ResourcePoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105507,7 +114129,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105573,7 +114195,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", // "id": "compute.resourcePolicies.testIamPermissions", // "parameterOrder": [ @@ -105631,8 +114253,7 @@ type RoutersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of routers. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of routers. func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105643,35 +114264,49 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *RoutersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *RoutersAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -105682,12 +114317,13 @@ func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggrega // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -105695,7 +114331,7 @@ func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedLi } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -105739,7 +114375,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105801,7 +114437,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } return ret, nil // { - // "description": "Retrieves an aggregated list of routers. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of routers.", // "httpMethod": "GET", // "id": "compute.routers.aggregatedList", // "parameterOrder": [ @@ -105809,25 +114445,30 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -105885,8 +114526,7 @@ type RoutersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Router resource. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified Router resource. func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105941,7 +114581,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106002,7 +114642,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Router resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified Router resource.", // "httpMethod": "DELETE", // "id": "compute.routers.delete", // "parameterOrder": [ @@ -106064,8 +114704,7 @@ type RoutersGetCall struct { } // Get: Returns the specified Router resource. Gets a list of available -// routers by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// routers by making a list() request. func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106111,7 +114750,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106175,7 +114814,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", // "httpMethod": "GET", // "id": "compute.routers.get", // "parameterOrder": [ @@ -106233,7 +114872,7 @@ type RoutersGetNatMappingInfoCall struct { } // GetNatMappingInfo: Retrieves runtime Nat mapping information of VM -// endpoints. (== suppress_warning http-rest-shadowed ==) +// endpoints. func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106246,24 +114885,25 @@ func (r *RoutersService) GetNatMappingInfo(project string, region string, router // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RoutersGetNatMappingInfoCall) Filter(filter string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("filter", filter) return c @@ -106271,10 +114911,10 @@ func (c *RoutersGetNatMappingInfoCall) Filter(filter string) *RoutersGetNatMappi // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -106285,12 +114925,13 @@ func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetN // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RoutersGetNatMappingInfoCall) OrderBy(orderBy string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("orderBy", orderBy) @@ -106298,7 +114939,7 @@ func (c *RoutersGetNatMappingInfoCall) OrderBy(orderBy string) *RoutersGetNatMap } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("pageToken", pageToken) @@ -106342,7 +114983,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106406,7 +115047,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp } return ret, nil // { - // "description": "Retrieves runtime Nat mapping information of VM endpoints. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves runtime Nat mapping information of VM endpoints.", // "httpMethod": "GET", // "id": "compute.routers.getNatMappingInfo", // "parameterOrder": [ @@ -106416,25 +115057,25 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -106508,7 +115149,7 @@ type RoutersGetRouterStatusCall struct { } // GetRouterStatus: Retrieves runtime information of the specified -// router. (== suppress_warning http-rest-shadowed ==) +// router. func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106554,7 +115195,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106618,7 +115259,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } return ret, nil // { - // "description": "Retrieves runtime information of the specified router. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves runtime information of the specified router.", // "httpMethod": "GET", // "id": "compute.routers.getRouterStatus", // "parameterOrder": [ @@ -106675,8 +115316,7 @@ type RoutersInsertCall struct { } // Insert: Creates a Router resource in the specified project and region -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106731,7 +115371,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106796,7 +115436,7 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a Router resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.routers.insert", // "parameterOrder": [ @@ -106852,7 +115492,7 @@ type RoutersListCall struct { } // List: Retrieves a list of Router resources available to the specified -// project. (== suppress_warning http-rest-shadowed ==) +// project. func (r *RoutersService) List(project string, region string) *RoutersListCall { c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106864,24 +115504,25 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c @@ -106889,10 +115530,10 @@ func (c *RoutersListCall) Filter(filter string) *RoutersListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -106903,12 +115544,13 @@ func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { c.urlParams_.Set("orderBy", orderBy) @@ -106916,7 +115558,7 @@ func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { c.urlParams_.Set("pageToken", pageToken) @@ -106960,7 +115602,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107023,7 +115665,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } return ret, nil // { - // "description": "Retrieves a list of Router resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of Router resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.routers.list", // "parameterOrder": [ @@ -107032,25 +115674,25 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -107118,8 +115760,7 @@ type RoutersPatchCall struct { // Patch: Patches the specified Router resource with the data included // in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. (== suppress_warning -// http-rest-shadowed ==) +// merge patch format and processing rules. func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107175,7 +115816,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107241,7 +115882,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.routers.patch", // "parameterOrder": [ @@ -107307,7 +115948,7 @@ type RoutersPreviewCall struct { // Preview: Preview fields auto-generated during router create and // update operations. Calling this method does NOT create or update the -// router. (== suppress_warning http-rest-shadowed ==) +// router. func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107344,7 +115985,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107410,7 +116051,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } return ret, nil // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router. (== suppress_warning http-rest-shadowed ==)", + // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", // "httpMethod": "POST", // "id": "compute.routers.preview", // "parameterOrder": [ @@ -107474,7 +116115,7 @@ type RoutersUpdateCall struct { // in the request. This method conforms to PUT semantics, which requests // that the state of the target resource be created or replaced with the // state defined by the representation enclosed in the request message -// payload. (== suppress_warning http-rest-shadowed ==) +// payload. func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107530,7 +116171,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107596,7 +116237,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload.", // "httpMethod": "PUT", // "id": "compute.routers.update", // "parameterOrder": [ @@ -107658,8 +116299,7 @@ type RoutesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Route resource. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified Route resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -107714,7 +116354,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107774,7 +116414,7 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Route resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified Route resource.", // "httpMethod": "DELETE", // "id": "compute.routes.delete", // "parameterOrder": [ @@ -107827,8 +116467,7 @@ type RoutesGetCall struct { } // Get: Returns the specified Route resource. Gets a list of available -// routes by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// routes by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get func (r *RoutesService) Get(project string, route string) *RoutesGetCall { c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -107874,7 +116513,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107937,7 +116576,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", // "httpMethod": "GET", // "id": "compute.routes.get", // "parameterOrder": [ @@ -107985,8 +116624,7 @@ type RoutesInsertCall struct { } // Insert: Creates a Route resource in the specified project using the -// data included in the request. (== suppress_warning http-rest-shadowed -// ==) +// data included in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -108041,7 +116679,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108105,7 +116743,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Route resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a Route resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.routes.insert", // "parameterOrder": [ @@ -108152,7 +116790,7 @@ type RoutesListCall struct { } // List: Retrieves the list of Route resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list func (r *RoutesService) List(project string) *RoutesListCall { c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -108164,24 +116802,25 @@ func (r *RoutesService) List(project string) *RoutesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c @@ -108189,10 +116828,10 @@ func (c *RoutesListCall) Filter(filter string) *RoutesListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -108203,12 +116842,13 @@ func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -108216,7 +116856,7 @@ func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -108260,7 +116900,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108322,7 +116962,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { } return ret, nil // { - // "description": "Retrieves the list of Route resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of Route resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.routes.list", // "parameterOrder": [ @@ -108330,25 +116970,25 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -108406,8 +117046,7 @@ type SecurityPoliciesAddRuleCall struct { header_ http.Header } -// AddRule: Inserts a rule into a security policy. (== suppress_warning -// http-rest-shadowed ==) +// AddRule: Inserts a rule into a security policy. func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108443,7 +117082,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108508,7 +117147,7 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Inserts a rule into a security policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Inserts a rule into a security policy.", // "httpMethod": "POST", // "id": "compute.securityPolicies.addRule", // "parameterOrder": [ @@ -108557,8 +117196,7 @@ type SecurityPoliciesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified policy. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified policy. func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108612,7 +117250,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108672,7 +117310,7 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified policy.", // "httpMethod": "DELETE", // "id": "compute.securityPolicies.delete", // "parameterOrder": [ @@ -108725,7 +117363,7 @@ type SecurityPoliciesGetCall struct { } // Get: List all of the ordered rules present in a single specified -// policy. (== suppress_warning http-rest-shadowed ==) +// policy. func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108770,7 +117408,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108833,7 +117471,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "List all of the ordered rules present in a single specified policy.", // "httpMethod": "GET", // "id": "compute.securityPolicies.get", // "parameterOrder": [ @@ -108881,8 +117519,7 @@ type SecurityPoliciesGetRuleCall struct { header_ http.Header } -// GetRule: Gets a rule at the specified priority. (== suppress_warning -// http-rest-shadowed ==) +// GetRule: Gets a rule at the specified priority. func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108934,7 +117571,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108997,7 +117634,7 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit } return ret, nil // { - // "description": "Gets a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets a rule at the specified priority.", // "httpMethod": "GET", // "id": "compute.securityPolicies.getRule", // "parameterOrder": [ @@ -109051,7 +117688,7 @@ type SecurityPoliciesInsertCall struct { } // Insert: Creates a new policy in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) +// included in the request. func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109105,7 +117742,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109169,7 +117806,7 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a new policy in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.securityPolicies.insert", // "parameterOrder": [ @@ -109216,7 +117853,7 @@ type SecurityPoliciesListCall struct { } // List: List all the policies that have been configured for the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109227,24 +117864,25 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -109252,10 +117890,10 @@ func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCa // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -109266,12 +117904,13 @@ func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPolicie // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -109279,7 +117918,7 @@ func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesList } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -109323,7 +117962,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109385,7 +118024,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "List all the policies that have been configured for the specified project.", // "httpMethod": "GET", // "id": "compute.securityPolicies.list", // "parameterOrder": [ @@ -109393,25 +118032,25 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -109457,6 +118096,242 @@ func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPo } } +// method id "compute.securityPolicies.listPreconfiguredExpressionSets": + +type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListPreconfiguredExpressionSets: Gets the current list of +// preconfigured Web Application Firewall (WAF) expressions. +func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c := &SecurityPoliciesListPreconfiguredExpressionSetsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) MaxResults(maxResults int64) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) OrderBy(orderBy string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToken string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Fields(s ...googleapi.Field) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) IfNoneMatch(entityTag string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Context(ctx context.Context) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.listPreconfiguredExpressionSets" call. +// Exactly one of +// *SecurityPoliciesListPreconfiguredExpressionSetsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SecurityPoliciesListPreconfiguredExpressionSetsResponse.ServerRespons +// e.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesListPreconfiguredExpressionSetsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.listPreconfiguredExpressionSets", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", + // "response": { + // "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.securityPolicies.patch": type SecurityPoliciesPatchCall struct { @@ -109470,7 +118345,7 @@ type SecurityPoliciesPatchCall struct { } // Patch: Patches the specified policy with the data included in the -// request. (== suppress_warning http-rest-shadowed ==) +// request. func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109525,7 +118400,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109590,7 +118465,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches the specified policy with the data included in the request.", // "httpMethod": "PATCH", // "id": "compute.securityPolicies.patch", // "parameterOrder": [ @@ -109645,8 +118520,7 @@ type SecurityPoliciesPatchRuleCall struct { header_ http.Header } -// PatchRule: Patches a rule at the specified priority. (== -// suppress_warning http-rest-shadowed ==) +// PatchRule: Patches a rule at the specified priority. func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109689,7 +118563,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109754,7 +118628,7 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Patches a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches a rule at the specified priority.", // "httpMethod": "POST", // "id": "compute.securityPolicies.patchRule", // "parameterOrder": [ @@ -109809,8 +118683,7 @@ type SecurityPoliciesRemoveRuleCall struct { header_ http.Header } -// RemoveRule: Deletes a rule at the specified priority. (== -// suppress_warning http-rest-shadowed ==) +// RemoveRule: Deletes a rule at the specified priority. func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109852,7 +118725,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109912,7 +118785,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Deletes a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes a rule at the specified priority.", // "httpMethod": "POST", // "id": "compute.securityPolicies.removeRule", // "parameterOrder": [ @@ -109970,8 +118843,7 @@ type SnapshotsDeleteCall struct { // deletion is needed for subsequent snapshots, the data will be moved // to the next corresponding snapshot. // -// For more information, see Deleting snapshots. (== suppress_warning -// http-rest-shadowed ==) +// For more information, see Deleting snapshots. // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -110026,7 +118898,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110086,7 +118958,7 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", // "httpMethod": "DELETE", // "id": "compute.snapshots.delete", // "parameterOrder": [ @@ -110139,8 +119011,7 @@ type SnapshotsGetCall struct { } // Get: Returns the specified Snapshot resource. Gets a list of -// available snapshots by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// available snapshots by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -110186,7 +119057,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110249,7 +119120,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", // "httpMethod": "GET", // "id": "compute.snapshots.get", // "parameterOrder": [ @@ -110298,8 +119169,7 @@ type SnapshotsGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) +// empty if no such policy or resource exists. func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110344,7 +119214,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110407,7 +119277,7 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", // "id": "compute.snapshots.getIamPolicy", // "parameterOrder": [ @@ -110455,7 +119325,7 @@ type SnapshotsListCall struct { } // List: Retrieves the list of Snapshot resources contained within the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list func (r *SnapshotsService) List(project string) *SnapshotsListCall { c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -110467,24 +119337,25 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c @@ -110492,10 +119363,10 @@ func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -110506,12 +119377,13 @@ func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -110519,7 +119391,7 @@ func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -110563,7 +119435,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110625,7 +119497,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err } return ret, nil // { - // "description": "Retrieves the list of Snapshot resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of Snapshot resources contained within the specified project.", // "httpMethod": "GET", // "id": "compute.snapshots.list", // "parameterOrder": [ @@ -110633,25 +119505,25 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -110710,8 +119582,7 @@ type SnapshotsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) +// resource. Replaces any existing policy. func (r *SnapshotsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *SnapshotsSetIamPolicyCall { c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110747,7 +119618,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110812,7 +119683,7 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", // "id": "compute.snapshots.setIamPolicy", // "parameterOrder": [ @@ -110863,8 +119734,7 @@ type SnapshotsSetLabelsCall struct { } // SetLabels: Sets the labels on a snapshot. To learn more about labels, -// read the Labeling Resources documentation. (== suppress_warning -// http-rest-shadowed ==) +// read the Labeling Resources documentation. func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110900,7 +119770,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110965,7 +119835,7 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", // "id": "compute.snapshots.setLabels", // "parameterOrder": [ @@ -111016,7 +119886,7 @@ type SnapshotsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) +// specified resource. func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111052,7 +119922,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111117,7 +119987,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", // "id": "compute.snapshots.testIamPermissions", // "parameterOrder": [ @@ -111168,8 +120038,7 @@ type SslCertificatesAggregatedListCall struct { } // AggregatedList: Retrieves the list of all SslCertificate resources, -// regional and global, available to the specified project. (== -// suppress_warning http-rest-shadowed ==) +// regional and global, available to the specified project. func (r *SslCertificatesService) AggregatedList(project string) *SslCertificatesAggregatedListCall { c := &SslCertificatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111180,35 +120049,49 @@ func (r *SslCertificatesService) AggregatedList(project string) *SslCertificates // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SslCertificatesAggregatedListCall) Filter(filter string) *SslCertificatesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *SslCertificatesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SslCertificatesAggregatedListCall) MaxResults(maxResults int64) *SslCertificatesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -111219,12 +120102,13 @@ func (c *SslCertificatesAggregatedListCall) MaxResults(maxResults int64) *SslCer // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SslCertificatesAggregatedListCall) OrderBy(orderBy string) *SslCertificatesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -111232,7 +120116,7 @@ func (c *SslCertificatesAggregatedListCall) OrderBy(orderBy string) *SslCertific } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCertificatesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -111276,7 +120160,7 @@ func (c *SslCertificatesAggregatedListCall) Header() http.Header { func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111338,7 +120222,7 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project.", // "httpMethod": "GET", // "id": "compute.sslCertificates.aggregatedList", // "parameterOrder": [ @@ -111346,25 +120230,30 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -111421,8 +120310,7 @@ type SslCertificatesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified SslCertificate resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified SslCertificate resource. func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111476,7 +120364,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111536,7 +120424,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified SslCertificate resource.", // "httpMethod": "DELETE", // "id": "compute.sslCertificates.delete", // "parameterOrder": [ @@ -111589,8 +120477,7 @@ type SslCertificatesGetCall struct { } // Get: Returns the specified SslCertificate resource. Gets a list of -// available SSL certificates by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available SSL certificates by making a list() request. func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111635,7 +120522,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111698,7 +120585,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", // "httpMethod": "GET", // "id": "compute.sslCertificates.get", // "parameterOrder": [ @@ -111746,8 +120633,7 @@ type SslCertificatesInsertCall struct { } // Insert: Creates a SslCertificate resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111801,7 +120687,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111865,7 +120751,7 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.sslCertificates.insert", // "parameterOrder": [ @@ -111912,7 +120798,7 @@ type SslCertificatesListCall struct { } // List: Retrieves the list of SslCertificate resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111923,24 +120809,25 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -111948,10 +120835,10 @@ func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -111962,12 +120849,13 @@ func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -111975,7 +120863,7 @@ func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCa } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -112019,7 +120907,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112081,7 +120969,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of SslCertificate resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.sslCertificates.list", // "parameterOrder": [ @@ -112089,25 +120977,25 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -112166,7 +121054,7 @@ type SslPoliciesDeleteCall struct { // Delete: Deletes the specified SSL policy. The SSL policy resource can // be deleted only if it is not in use by any TargetHttpsProxy or -// TargetSslProxy resources. (== suppress_warning http-rest-shadowed ==) +// TargetSslProxy resources. func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112220,7 +121108,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112280,7 +121168,7 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", // "httpMethod": "DELETE", // "id": "compute.sslPolicies.delete", // "parameterOrder": [ @@ -112332,7 +121220,7 @@ type SslPoliciesGetCall struct { } // Get: Lists all of the ordered rules present in a single specified -// policy. (== suppress_warning http-rest-shadowed ==) +// policy. func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112377,7 +121265,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112440,7 +121328,7 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error } return ret, nil // { - // "description": "Lists all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists all of the ordered rules present in a single specified policy.", // "httpMethod": "GET", // "id": "compute.sslPolicies.get", // "parameterOrder": [ @@ -112487,8 +121375,7 @@ type SslPoliciesInsertCall struct { } // Insert: Returns the specified SSL policy resource. Gets a list of -// available SSL policies by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available SSL policies by making a list() request. func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112542,7 +121429,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112606,7 +121493,7 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", // "httpMethod": "POST", // "id": "compute.sslPolicies.insert", // "parameterOrder": [ @@ -112653,7 +121540,7 @@ type SslPoliciesListCall struct { } // List: Lists all the SSL policies that have been configured for the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112664,24 +121551,25 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -112689,10 +121577,10 @@ func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SslPoliciesListCall) MaxResults(maxResults int64) *SslPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -112703,12 +121591,13 @@ func (c *SslPoliciesListCall) MaxResults(maxResults int64) *SslPoliciesListCall // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SslPoliciesListCall) OrderBy(orderBy string) *SslPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -112716,7 +121605,7 @@ func (c *SslPoliciesListCall) OrderBy(orderBy string) *SslPoliciesListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -112760,7 +121649,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112822,7 +121711,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList } return ret, nil // { - // "description": "Lists all the SSL policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists all the SSL policies that have been configured for the specified project.", // "httpMethod": "GET", // "id": "compute.sslPolicies.list", // "parameterOrder": [ @@ -112830,25 +121719,25 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -112906,8 +121795,7 @@ type SslPoliciesListAvailableFeaturesCall struct { } // ListAvailableFeatures: Lists all features that can be specified in -// the SSL policy when using custom profile. (== suppress_warning -// http-rest-shadowed ==) +// the SSL policy when using custom profile. func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112918,24 +121806,25 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c @@ -112943,10 +121832,10 @@ func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPolicie // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -112957,12 +121846,13 @@ func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *Ssl // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("orderBy", orderBy) @@ -112970,7 +121860,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPolic } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("pageToken", pageToken) @@ -113014,7 +121904,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113078,7 +121968,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists all features that can be specified in the SSL policy when using custom profile. (== suppress_warning http-rest-shadowed ==)", + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", // "httpMethod": "GET", // "id": "compute.sslPolicies.listAvailableFeatures", // "parameterOrder": [ @@ -113086,25 +121976,25 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -113142,7 +122032,7 @@ type SslPoliciesPatchCall struct { } // Patch: Patches the specified SSL policy with the data included in the -// request. (== suppress_warning http-rest-shadowed ==) +// request. func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113197,7 +122087,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113262,7 +122152,7 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Patches the specified SSL policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches the specified SSL policy with the data included in the request.", // "httpMethod": "PATCH", // "id": "compute.sslPolicies.patch", // "parameterOrder": [ @@ -113315,8 +122205,7 @@ type SubnetworksAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of subnetworks. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of subnetworks. func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113327,35 +122216,49 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *SubnetworksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *SubnetworksAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -113366,12 +122269,13 @@ func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *Subnetwork // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -113379,7 +122283,7 @@ func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggr } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -113423,7 +122327,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113485,7 +122389,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne } return ret, nil // { - // "description": "Retrieves an aggregated list of subnetworks. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of subnetworks.", // "httpMethod": "GET", // "id": "compute.subnetworks.aggregatedList", // "parameterOrder": [ @@ -113493,25 +122397,30 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -113569,8 +122478,7 @@ type SubnetworksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified subnetwork. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified subnetwork. func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113625,7 +122533,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113686,7 +122594,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified subnetwork. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified subnetwork.", // "httpMethod": "DELETE", // "id": "compute.subnetworks.delete", // "parameterOrder": [ @@ -113748,7 +122656,7 @@ type SubnetworksExpandIpCidrRangeCall struct { } // ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a -// specified value. (== suppress_warning http-rest-shadowed ==) +// specified value. func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113804,7 +122712,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113870,7 +122778,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Expands the IP CIDR range of the subnetwork to a specified value. (== suppress_warning http-rest-shadowed ==)", + // "description": "Expands the IP CIDR range of the subnetwork to a specified value.", // "httpMethod": "POST", // "id": "compute.subnetworks.expandIpCidrRange", // "parameterOrder": [ @@ -113935,8 +122843,7 @@ type SubnetworksGetCall struct { } // Get: Returns the specified subnetwork. Gets a list of available -// subnetworks list() request. (== suppress_warning http-rest-shadowed -// ==) +// subnetworks list() request. func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113982,7 +122889,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114046,7 +122953,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } return ret, nil // { - // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", // "httpMethod": "GET", // "id": "compute.subnetworks.get", // "parameterOrder": [ @@ -114104,8 +123011,7 @@ type SubnetworksGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. (== suppress_warning -// http-rest-shadowed ==) +// empty if no such policy or resource exists. func (r *SubnetworksService) GetIamPolicy(project string, region string, resource string) *SubnetworksGetIamPolicyCall { c := &SubnetworksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114151,7 +123057,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114215,7 +123121,7 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", // "id": "compute.subnetworks.getIamPolicy", // "parameterOrder": [ @@ -114272,7 +123178,7 @@ type SubnetworksInsertCall struct { } // Insert: Creates a subnetwork in the specified project using the data -// included in the request. (== suppress_warning http-rest-shadowed ==) +// included in the request. func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114327,7 +123233,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114392,7 +123298,7 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a subnetwork in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a subnetwork in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.subnetworks.insert", // "parameterOrder": [ @@ -114448,7 +123354,7 @@ type SubnetworksListCall struct { } // List: Retrieves a list of subnetworks available to the specified -// project. (== suppress_warning http-rest-shadowed ==) +// project. func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114460,24 +123366,25 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -114485,10 +123392,10 @@ func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -114499,12 +123406,13 @@ func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SubnetworksListCall) OrderBy(orderBy string) *SubnetworksListCall { c.urlParams_.Set("orderBy", orderBy) @@ -114512,7 +123420,7 @@ func (c *SubnetworksListCall) OrderBy(orderBy string) *SubnetworksListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { c.urlParams_.Set("pageToken", pageToken) @@ -114556,7 +123464,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114619,7 +123527,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, } return ret, nil // { - // "description": "Retrieves a list of subnetworks available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of subnetworks available to the specified project.", // "httpMethod": "GET", // "id": "compute.subnetworks.list", // "parameterOrder": [ @@ -114628,25 +123536,25 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -114713,7 +123621,6 @@ type SubnetworksListUsableCall struct { // ListUsable: Retrieves an aggregated list of all usable subnetworks in // the project. The list contains all of the subnetworks in the project // and the subnetworks that were shared by a Shared VPC host project. -// (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114724,24 +123631,25 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { c.urlParams_.Set("filter", filter) return c @@ -114749,10 +123657,10 @@ func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsable // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *SubnetworksListUsableCall) MaxResults(maxResults int64) *SubnetworksListUsableCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -114763,12 +123671,13 @@ func (c *SubnetworksListUsableCall) MaxResults(maxResults int64) *SubnetworksLis // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *SubnetworksListUsableCall) OrderBy(orderBy string) *SubnetworksListUsableCall { c.urlParams_.Set("orderBy", orderBy) @@ -114776,7 +123685,7 @@ func (c *SubnetworksListUsableCall) OrderBy(orderBy string) *SubnetworksListUsab } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksListUsableCall { c.urlParams_.Set("pageToken", pageToken) @@ -114820,7 +123729,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114882,7 +123791,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub } return ret, nil // { - // "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", // "httpMethod": "GET", // "id": "compute.subnetworks.listUsable", // "parameterOrder": [ @@ -114890,25 +123799,25 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -114970,8 +123879,7 @@ type SubnetworksPatchCall struct { // Patch: Patches the specified subnetwork with the data included in the // request. Only certain fields can up updated with a patch request as // indicated in the field descriptions. You must specify the current -// fingeprint of the subnetwork resource being patched. (== -// suppress_warning http-rest-shadowed ==) +// fingerprint of the subnetwork resource being patched. func (r *SubnetworksService) Patch(project string, region string, subnetwork string, subnetwork2 *Subnetwork) *SubnetworksPatchCall { c := &SubnetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115042,7 +123950,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115108,7 +124016,7 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingerprint of the subnetwork resource being patched.", // "httpMethod": "PATCH", // "id": "compute.subnetworks.patch", // "parameterOrder": [ @@ -115179,8 +124087,7 @@ type SubnetworksSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. (== suppress_warning -// http-rest-shadowed ==) +// resource. Replaces any existing policy. func (r *SubnetworksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *SubnetworksSetIamPolicyCall { c := &SubnetworksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115217,7 +124124,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115283,7 +124190,7 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", // "id": "compute.subnetworks.setIamPolicy", // "parameterOrder": [ @@ -115344,7 +124251,7 @@ type SubnetworksSetPrivateIpGoogleAccessCall struct { // SetPrivateIpGoogleAccess: Set whether VMs in this subnet can access // Google services without assigning external IP addresses through -// Private Google Access. (== suppress_warning http-rest-shadowed ==) +// Private Google Access. func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region string, subnetwork string, subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest) *SubnetworksSetPrivateIpGoogleAccessCall { c := &SubnetworksSetPrivateIpGoogleAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115400,7 +124307,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115466,7 +124373,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access. (== suppress_warning http-rest-shadowed ==)", + // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", // "httpMethod": "POST", // "id": "compute.subnetworks.setPrivateIpGoogleAccess", // "parameterOrder": [ @@ -115531,7 +124438,7 @@ type SubnetworksTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) +// specified resource. func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115568,7 +124475,7 @@ func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115634,7 +124541,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", // "id": "compute.subnetworks.testIamPermissions", // "parameterOrder": [ @@ -115693,8 +124600,7 @@ type TargetHttpProxiesAggregatedListCall struct { } // AggregatedList: Retrieves the list of all TargetHttpProxy resources, -// regional and global, available to the specified project. (== -// suppress_warning http-rest-shadowed ==) +// regional and global, available to the specified project. func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpProxiesAggregatedListCall { c := &TargetHttpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115705,35 +124611,49 @@ func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpPro // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetHttpProxiesAggregatedListCall) Filter(filter string) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetHttpProxiesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetHttpProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -115744,12 +124664,13 @@ func (c *TargetHttpProxiesAggregatedListCall) MaxResults(maxResults int64) *Targ // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetHttpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -115757,7 +124678,7 @@ func (c *TargetHttpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHtt } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -115801,7 +124722,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115863,7 +124784,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project.", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.aggregatedList", // "parameterOrder": [ @@ -115871,25 +124792,30 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -115946,8 +124872,7 @@ type TargetHttpProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetHttpProxy resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetHttpProxy resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -116002,7 +124927,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116062,7 +124987,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetHttpProxy resource.", // "httpMethod": "DELETE", // "id": "compute.targetHttpProxies.delete", // "parameterOrder": [ @@ -116115,8 +125040,7 @@ type TargetHttpProxiesGetCall struct { } // Get: Returns the specified TargetHttpProxy resource. Gets a list of -// available target HTTP proxies by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available target HTTP proxies by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -116162,7 +125086,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116225,7 +125149,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.get", // "parameterOrder": [ @@ -116273,8 +125197,7 @@ type TargetHttpProxiesInsertCall struct { } // Insert: Creates a TargetHttpProxy resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -116329,7 +125252,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116393,7 +125316,7 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetHttpProxies.insert", // "parameterOrder": [ @@ -116440,7 +125363,7 @@ type TargetHttpProxiesListCall struct { } // List: Retrieves the list of TargetHttpProxy resources available to -// the specified project. (== suppress_warning http-rest-shadowed ==) +// the specified project. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -116452,24 +125375,25 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -116477,10 +125401,10 @@ func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesList // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -116491,12 +125415,13 @@ func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProx // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetHttpProxiesListCall) OrderBy(orderBy string) *TargetHttpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -116504,7 +125429,7 @@ func (c *TargetHttpProxiesListCall) OrderBy(orderBy string) *TargetHttpProxiesLi } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -116548,7 +125473,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116610,7 +125535,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.list", // "parameterOrder": [ @@ -116618,25 +125543,25 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -116694,8 +125619,7 @@ type TargetHttpProxiesSetUrlMapCall struct { header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetUrlMap: Changes the URL map for TargetHttpProxy. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -116751,7 +125675,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116816,7 +125740,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the URL map for TargetHttpProxy.", // "httpMethod": "POST", // "id": "compute.targetHttpProxies.setUrlMap", // "parameterOrder": [ @@ -116871,8 +125795,7 @@ type TargetHttpsProxiesAggregatedListCall struct { } // AggregatedList: Retrieves the list of all TargetHttpsProxy resources, -// regional and global, available to the specified project. (== -// suppress_warning http-rest-shadowed ==) +// regional and global, available to the specified project. func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsProxiesAggregatedListCall { c := &TargetHttpsProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116883,35 +125806,49 @@ func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsP // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetHttpsProxiesAggregatedListCall) Filter(filter string) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetHttpsProxiesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetHttpsProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -116922,12 +125859,13 @@ func (c *TargetHttpsProxiesAggregatedListCall) MaxResults(maxResults int64) *Tar // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetHttpsProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -116935,7 +125873,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHt } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -116979,7 +125917,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117041,7 +125979,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project.", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.aggregatedList", // "parameterOrder": [ @@ -117049,25 +125987,30 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -117124,8 +126067,7 @@ type TargetHttpsProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetHttpsProxy resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetHttpsProxy resource. func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117179,7 +126121,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117239,7 +126181,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetHttpsProxy resource.", // "httpMethod": "DELETE", // "id": "compute.targetHttpsProxies.delete", // "parameterOrder": [ @@ -117292,8 +126234,7 @@ type TargetHttpsProxiesGetCall struct { } // Get: Returns the specified TargetHttpsProxy resource. Gets a list of -// available target HTTPS proxies by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available target HTTPS proxies by making a list() request. func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117338,7 +126279,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117401,7 +126342,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.get", // "parameterOrder": [ @@ -117449,8 +126390,7 @@ type TargetHttpsProxiesInsertCall struct { } // Insert: Creates a TargetHttpsProxy resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117504,7 +126444,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117568,7 +126508,7 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.insert", // "parameterOrder": [ @@ -117615,7 +126555,7 @@ type TargetHttpsProxiesListCall struct { } // List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project. (== suppress_warning http-rest-shadowed ==) +// the specified project. func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117626,24 +126566,25 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -117651,10 +126592,10 @@ func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesLi // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -117665,12 +126606,13 @@ func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsPr // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetHttpsProxiesListCall) OrderBy(orderBy string) *TargetHttpsProxiesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -117678,7 +126620,7 @@ func (c *TargetHttpsProxiesListCall) OrderBy(orderBy string) *TargetHttpsProxies } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -117722,7 +126664,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117784,7 +126726,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.list", // "parameterOrder": [ @@ -117792,25 +126734,25 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -117869,7 +126811,6 @@ type TargetHttpsProxiesSetQuicOverrideCall struct { } // SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. -// (== suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117924,7 +126865,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117989,7 +126930,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the QUIC override policy for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the QUIC override policy for TargetHttpsProxy.", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setQuicOverride", // "parameterOrder": [ @@ -118044,7 +126985,6 @@ type TargetHttpsProxiesSetSslCertificatesCall struct { } // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -// (== suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118099,7 +127039,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118164,7 +127104,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Replaces SslCertificates for TargetHttpsProxy.", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setSslCertificates", // "parameterOrder": [ @@ -118223,7 +127163,7 @@ type TargetHttpsProxiesSetSslPolicyCall struct { // policy specifies the server-side support for SSL features. This // affects connections between clients and the HTTPS proxy load // balancer. They do not affect the connection between the load balancer -// and the backends. (== suppress_warning http-rest-shadowed ==) +// and the backends. func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118278,7 +127218,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118343,7 +127283,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setSslPolicy", // "parameterOrder": [ @@ -118397,8 +127337,7 @@ type TargetHttpsProxiesSetUrlMapCall struct { header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetUrlMap: Changes the URL map for TargetHttpsProxy. func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118453,7 +127392,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118518,7 +127457,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the URL map for TargetHttpsProxy.", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setUrlMap", // "parameterOrder": [ @@ -118572,8 +127511,7 @@ type TargetInstancesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target instances. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of target instances. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -118585,35 +127523,49 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetInstancesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -118624,12 +127576,13 @@ func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *Target // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -118637,7 +127590,7 @@ func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInsta } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -118681,7 +127634,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118743,7 +127696,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Retrieves an aggregated list of target instances. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of target instances.", // "httpMethod": "GET", // "id": "compute.targetInstances.aggregatedList", // "parameterOrder": [ @@ -118751,25 +127704,30 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -118827,8 +127785,7 @@ type TargetInstancesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetInstance resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetInstance resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -118884,7 +127841,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118945,7 +127902,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified TargetInstance resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetInstance resource.", // "httpMethod": "DELETE", // "id": "compute.targetInstances.delete", // "parameterOrder": [ @@ -119007,8 +127964,7 @@ type TargetInstancesGetCall struct { } // Get: Returns the specified TargetInstance resource. Gets a list of -// available target instances by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available target instances by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119055,7 +128011,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119119,7 +128075,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } return ret, nil // { - // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetInstances.get", // "parameterOrder": [ @@ -119176,8 +128132,7 @@ type TargetInstancesInsertCall struct { } // Insert: Creates a TargetInstance resource in the specified project -// and zone using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// and zone using the data included in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119233,7 +128188,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119298,7 +128253,7 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetInstances.insert", // "parameterOrder": [ @@ -119354,8 +128309,7 @@ type TargetInstancesListCall struct { } // List: Retrieves a list of TargetInstance resources available to the -// specified project and zone. (== suppress_warning http-rest-shadowed -// ==) +// specified project and zone. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119368,24 +128322,25 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -119393,10 +128348,10 @@ func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -119407,12 +128362,13 @@ func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesL // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -119420,7 +128376,7 @@ func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCa } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -119464,7 +128420,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119527,7 +128483,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta } return ret, nil // { - // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", // "httpMethod": "GET", // "id": "compute.targetInstances.list", // "parameterOrder": [ @@ -119536,25 +128492,25 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -119620,8 +128576,7 @@ type TargetPoolsAddHealthCheckCall struct { header_ http.Header } -// AddHealthCheck: Adds health check URLs to a target pool. (== -// suppress_warning http-rest-shadowed ==) +// AddHealthCheck: Adds health check URLs to a target pool. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119678,7 +128633,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119744,7 +128699,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Adds health check URLs to a target pool. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds health check URLs to a target pool.", // "httpMethod": "POST", // "id": "compute.targetPools.addHealthCheck", // "parameterOrder": [ @@ -119808,8 +128763,7 @@ type TargetPoolsAddInstanceCall struct { header_ http.Header } -// AddInstance: Adds an instance to a target pool. (== suppress_warning -// http-rest-shadowed ==) +// AddInstance: Adds an instance to a target pool. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119866,7 +128820,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119932,7 +128886,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Adds an instance to a target pool. (== suppress_warning http-rest-shadowed ==)", + // "description": "Adds an instance to a target pool.", // "httpMethod": "POST", // "id": "compute.targetPools.addInstance", // "parameterOrder": [ @@ -119994,8 +128948,7 @@ type TargetPoolsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target pools. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of target pools. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120007,35 +128960,49 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetPoolsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -120046,12 +129013,13 @@ func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPool // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetPoolsAggregatedListCall) OrderBy(orderBy string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -120059,7 +129027,7 @@ func (c *TargetPoolsAggregatedListCall) OrderBy(orderBy string) *TargetPoolsAggr } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -120103,7 +129071,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120165,7 +129133,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe } return ret, nil // { - // "description": "Retrieves an aggregated list of target pools. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of target pools.", // "httpMethod": "GET", // "id": "compute.targetPools.aggregatedList", // "parameterOrder": [ @@ -120173,25 +129141,30 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -120249,8 +129222,7 @@ type TargetPoolsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified target pool. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified target pool. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120306,7 +129278,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120367,7 +129339,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified target pool. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified target pool.", // "httpMethod": "DELETE", // "id": "compute.targetPools.delete", // "parameterOrder": [ @@ -120429,8 +129401,7 @@ type TargetPoolsGetCall struct { } // Get: Returns the specified target pool. Gets a list of available -// target pools by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// target pools by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120477,7 +129448,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120541,7 +129512,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro } return ret, nil // { - // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetPools.get", // "parameterOrder": [ @@ -120599,8 +129570,7 @@ type TargetPoolsGetHealthCall struct { } // GetHealth: Gets the most recent health check results for each IP for -// the instance that is referenced by the given target pool. (== -// suppress_warning http-rest-shadowed ==) +// the instance that is referenced by the given target pool. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120638,7 +129608,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120704,7 +129674,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool } return ret, nil // { - // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool. (== suppress_warning http-rest-shadowed ==)", + // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", // "httpMethod": "POST", // "id": "compute.targetPools.getHealth", // "parameterOrder": [ @@ -120764,8 +129734,7 @@ type TargetPoolsInsertCall struct { } // Insert: Creates a target pool in the specified project and region -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120821,7 +129790,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120886,7 +129855,7 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a target pool in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a target pool in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetPools.insert", // "parameterOrder": [ @@ -120942,7 +129911,7 @@ type TargetPoolsListCall struct { } // List: Retrieves a list of target pools available to the specified -// project and region. (== suppress_warning http-rest-shadowed ==) +// project and region. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120955,24 +129924,25 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c @@ -120980,10 +129950,10 @@ func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -120994,12 +129964,13 @@ func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -121007,7 +129978,7 @@ func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -121051,7 +130022,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121114,7 +130085,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, } return ret, nil // { - // "description": "Retrieves a list of target pools available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of target pools available to the specified project and region.", // "httpMethod": "GET", // "id": "compute.targetPools.list", // "parameterOrder": [ @@ -121123,25 +130094,25 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -121207,8 +130178,7 @@ type TargetPoolsRemoveHealthCheckCall struct { header_ http.Header } -// RemoveHealthCheck: Removes health check URL from a target pool. (== -// suppress_warning http-rest-shadowed ==) +// RemoveHealthCheck: Removes health check URL from a target pool. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -121265,7 +130235,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121331,7 +130301,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Removes health check URL from a target pool. (== suppress_warning http-rest-shadowed ==)", + // "description": "Removes health check URL from a target pool.", // "httpMethod": "POST", // "id": "compute.targetPools.removeHealthCheck", // "parameterOrder": [ @@ -121395,8 +130365,7 @@ type TargetPoolsRemoveInstanceCall struct { header_ http.Header } -// RemoveInstance: Removes instance URL from a target pool. (== -// suppress_warning http-rest-shadowed ==) +// RemoveInstance: Removes instance URL from a target pool. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -121453,7 +130422,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121519,7 +130488,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Removes instance URL from a target pool. (== suppress_warning http-rest-shadowed ==)", + // "description": "Removes instance URL from a target pool.", // "httpMethod": "POST", // "id": "compute.targetPools.removeInstance", // "parameterOrder": [ @@ -121583,8 +130552,7 @@ type TargetPoolsSetBackupCall struct { header_ http.Header } -// SetBackup: Changes a backup target pool's configurations. (== -// suppress_warning http-rest-shadowed ==) +// SetBackup: Changes a backup target pool's configurations. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -121648,7 +130616,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121714,7 +130682,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Changes a backup target pool's configurations. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes a backup target pool's configurations.", // "httpMethod": "POST", // "id": "compute.targetPools.setBackup", // "parameterOrder": [ @@ -121782,8 +130750,7 @@ type TargetSslProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetSslProxy resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetSslProxy resource. func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121837,7 +130804,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121897,7 +130864,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified TargetSslProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetSslProxy resource.", // "httpMethod": "DELETE", // "id": "compute.targetSslProxies.delete", // "parameterOrder": [ @@ -121950,8 +130917,7 @@ type TargetSslProxiesGetCall struct { } // Get: Returns the specified TargetSslProxy resource. Gets a list of -// available target SSL proxies by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available target SSL proxies by making a list() request. func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121996,7 +130962,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122059,7 +131025,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr } return ret, nil // { - // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetSslProxies.get", // "parameterOrder": [ @@ -122107,8 +131073,7 @@ type TargetSslProxiesInsertCall struct { } // Insert: Creates a TargetSslProxy resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122162,7 +131127,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122226,7 +131191,7 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetSslProxies.insert", // "parameterOrder": [ @@ -122273,7 +131238,7 @@ type TargetSslProxiesListCall struct { } // List: Retrieves the list of TargetSslProxy resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122284,24 +131249,25 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -122309,10 +131275,10 @@ func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCa // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetSslProxiesListCall) MaxResults(maxResults int64) *TargetSslProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -122323,12 +131289,13 @@ func (c *TargetSslProxiesListCall) MaxResults(maxResults int64) *TargetSslProxie // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetSslProxiesListCall) OrderBy(orderBy string) *TargetSslProxiesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -122336,7 +131303,7 @@ func (c *TargetSslProxiesListCall) OrderBy(orderBy string) *TargetSslProxiesList } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxiesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -122380,7 +131347,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122442,7 +131409,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP } return ret, nil // { - // "description": "Retrieves the list of TargetSslProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.targetSslProxies.list", // "parameterOrder": [ @@ -122450,25 +131417,25 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -122526,8 +131493,7 @@ type TargetSslProxiesSetBackendServiceCall struct { header_ http.Header } -// SetBackendService: Changes the BackendService for TargetSslProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetBackendService: Changes the BackendService for TargetSslProxy. func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122582,7 +131548,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122647,7 +131613,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the BackendService for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the BackendService for TargetSslProxy.", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setBackendService", // "parameterOrder": [ @@ -122702,8 +131668,7 @@ type TargetSslProxiesSetProxyHeaderCall struct { header_ http.Header } -// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122758,7 +131723,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122823,7 +131788,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes the ProxyHeaderType for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the ProxyHeaderType for TargetSslProxy.", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setProxyHeader", // "parameterOrder": [ @@ -122878,8 +131843,7 @@ type TargetSslProxiesSetSslCertificatesCall struct { header_ http.Header } -// SetSslCertificates: Changes SslCertificates for TargetSslProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetSslCertificates: Changes SslCertificates for TargetSslProxy. func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122934,7 +131898,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122999,7 +131963,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Changes SslCertificates for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes SslCertificates for TargetSslProxy.", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslCertificates", // "parameterOrder": [ @@ -123058,7 +132022,6 @@ type TargetSslProxiesSetSslPolicyCall struct { // specifies the server-side support for SSL features. This affects // connections between clients and the SSL proxy load balancer. They do // not affect the connection between the load balancer and the backends. -// (== suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123113,7 +132076,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123178,7 +132141,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", // "parameterOrder": [ @@ -123231,8 +132194,7 @@ type TargetTcpProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetTcpProxy resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified TargetTcpProxy resource. func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) *TargetTcpProxiesDeleteCall { c := &TargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123286,7 +132248,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123346,7 +132308,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified TargetTcpProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified TargetTcpProxy resource.", // "httpMethod": "DELETE", // "id": "compute.targetTcpProxies.delete", // "parameterOrder": [ @@ -123399,8 +132361,7 @@ type TargetTcpProxiesGetCall struct { } // Get: Returns the specified TargetTcpProxy resource. Gets a list of -// available target TCP proxies by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available target TCP proxies by making a list() request. func (r *TargetTcpProxiesService) Get(project string, targetTcpProxy string) *TargetTcpProxiesGetCall { c := &TargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123445,7 +132406,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123508,7 +132469,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr } return ret, nil // { - // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetTcpProxies.get", // "parameterOrder": [ @@ -123556,8 +132517,7 @@ type TargetTcpProxiesInsertCall struct { } // Insert: Creates a TargetTcpProxy resource in the specified project -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetTcpProxy) *TargetTcpProxiesInsertCall { c := &TargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123611,7 +132571,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123675,7 +132635,7 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetTcpProxies.insert", // "parameterOrder": [ @@ -123722,7 +132682,7 @@ type TargetTcpProxiesListCall struct { } // List: Retrieves the list of TargetTcpProxy resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall { c := &TargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123733,24 +132693,25 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -123758,10 +132719,10 @@ func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCa // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetTcpProxiesListCall) MaxResults(maxResults int64) *TargetTcpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -123772,12 +132733,13 @@ func (c *TargetTcpProxiesListCall) MaxResults(maxResults int64) *TargetTcpProxie // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetTcpProxiesListCall) OrderBy(orderBy string) *TargetTcpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -123785,7 +132747,7 @@ func (c *TargetTcpProxiesListCall) OrderBy(orderBy string) *TargetTcpProxiesList } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -123829,7 +132791,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123891,7 +132853,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP } return ret, nil // { - // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.targetTcpProxies.list", // "parameterOrder": [ @@ -123899,25 +132861,25 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -123975,8 +132937,7 @@ type TargetTcpProxiesSetBackendServiceCall struct { header_ http.Header } -// SetBackendService: Changes the BackendService for TargetTcpProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetBackendService: Changes the BackendService for TargetTcpProxy. func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124031,7 +132992,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124096,7 +133057,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the BackendService for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the BackendService for TargetTcpProxy.", // "httpMethod": "POST", // "id": "compute.targetTcpProxies.setBackendService", // "parameterOrder": [ @@ -124151,8 +133112,7 @@ type TargetTcpProxiesSetProxyHeaderCall struct { header_ http.Header } -// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. (== -// suppress_warning http-rest-shadowed ==) +// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124207,7 +133167,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124272,7 +133232,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes the ProxyHeaderType for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", + // "description": "Changes the ProxyHeaderType for TargetTcpProxy.", // "httpMethod": "POST", // "id": "compute.targetTcpProxies.setProxyHeader", // "parameterOrder": [ @@ -124327,7 +133287,6 @@ type TargetVpnGatewaysAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of target VPN gateways. -// (== suppress_warning http-rest-shadowed ==) func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124338,35 +133297,49 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetVpnGatewaysAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -124377,12 +133350,13 @@ func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *Targ // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -124390,7 +133364,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpn } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -124434,7 +133408,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124496,7 +133470,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of target VPN gateways. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of target VPN gateways.", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.aggregatedList", // "parameterOrder": [ @@ -124504,25 +133478,30 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -124580,8 +133559,7 @@ type TargetVpnGatewaysDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified target VPN gateway. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified target VPN gateway. func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124636,7 +133614,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124697,7 +133675,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified target VPN gateway. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified target VPN gateway.", // "httpMethod": "DELETE", // "id": "compute.targetVpnGateways.delete", // "parameterOrder": [ @@ -124759,8 +133737,7 @@ type TargetVpnGatewaysGetCall struct { } // Get: Returns the specified target VPN gateway. Gets a list of -// available target VPN gateways by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available target VPN gateways by making a list() request. func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124806,7 +133783,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124870,7 +133847,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG } return ret, nil // { - // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.get", // "parameterOrder": [ @@ -124927,8 +133904,7 @@ type TargetVpnGatewaysInsertCall struct { } // Insert: Creates a target VPN gateway in the specified project and -// region using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// region using the data included in the request. func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124983,7 +133959,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125048,7 +134024,7 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.targetVpnGateways.insert", // "parameterOrder": [ @@ -125104,8 +134080,7 @@ type TargetVpnGatewaysListCall struct { } // List: Retrieves a list of target VPN gateways available to the -// specified project and region. (== suppress_warning http-rest-shadowed -// ==) +// specified project and region. func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125117,24 +134092,25 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -125142,10 +134118,10 @@ func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysList // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -125156,12 +134132,13 @@ func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatew // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) @@ -125169,7 +134146,7 @@ func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysLi } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) @@ -125213,7 +134190,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125276,7 +134253,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn } return ret, nil // { - // "description": "Retrieves a list of target VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of target VPN gateways available to the specified project and region.", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.list", // "parameterOrder": [ @@ -125285,25 +134262,25 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -125368,8 +134345,7 @@ type UrlMapsAggregatedListCall struct { } // AggregatedList: Retrieves the list of all UrlMap resources, regional -// and global, available to the specified project. (== suppress_warning -// http-rest-shadowed ==) +// and global, available to the specified project. func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCall { c := &UrlMapsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125380,35 +134356,49 @@ func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCa // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *UrlMapsAggregatedListCall) Filter(filter string) *UrlMapsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *UrlMapsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *UrlMapsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *UrlMapsAggregatedListCall) MaxResults(maxResults int64) *UrlMapsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -125419,12 +134409,13 @@ func (c *UrlMapsAggregatedListCall) MaxResults(maxResults int64) *UrlMapsAggrega // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *UrlMapsAggregatedListCall) OrderBy(orderBy string) *UrlMapsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -125432,7 +134423,7 @@ func (c *UrlMapsAggregatedListCall) OrderBy(orderBy string) *UrlMapsAggregatedLi } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -125476,7 +134467,7 @@ func (c *UrlMapsAggregatedListCall) Header() http.Header { func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125538,7 +134529,7 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg } return ret, nil // { - // "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project.", // "httpMethod": "GET", // "id": "compute.urlMaps.aggregatedList", // "parameterOrder": [ @@ -125546,25 +134537,30 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -125621,8 +134617,7 @@ type UrlMapsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified UrlMap resource. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified UrlMap resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -125677,7 +134672,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125737,7 +134732,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified UrlMap resource.", // "httpMethod": "DELETE", // "id": "compute.urlMaps.delete", // "parameterOrder": [ @@ -125790,8 +134785,7 @@ type UrlMapsGetCall struct { } // Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// URL maps by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -125837,7 +134831,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125900,7 +134894,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", // "httpMethod": "GET", // "id": "compute.urlMaps.get", // "parameterOrder": [ @@ -125948,8 +134942,7 @@ type UrlMapsInsertCall struct { } // Insert: Creates a UrlMap resource in the specified project using the -// data included in the request. (== suppress_warning http-rest-shadowed -// ==) +// data included in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -126004,7 +134997,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126068,7 +135061,7 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", // "httpMethod": "POST", // "id": "compute.urlMaps.insert", // "parameterOrder": [ @@ -126116,8 +135109,7 @@ type UrlMapsInvalidateCacheCall struct { } // InvalidateCache: Initiates a cache invalidation operation, -// invalidating the specified path, scoped to the specified UrlMap. (== -// suppress_warning http-rest-shadowed ==) +// invalidating the specified path, scoped to the specified UrlMap. func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126172,7 +135164,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126237,7 +135229,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap. (== suppress_warning http-rest-shadowed ==)", + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", // "httpMethod": "POST", // "id": "compute.urlMaps.invalidateCache", // "parameterOrder": [ @@ -126292,7 +135284,7 @@ type UrlMapsListCall struct { } // List: Retrieves the list of UrlMap resources available to the -// specified project. (== suppress_warning http-rest-shadowed ==) +// specified project. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list func (r *UrlMapsService) List(project string) *UrlMapsListCall { c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -126304,24 +135296,25 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -126329,10 +135322,10 @@ func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -126343,12 +135336,13 @@ func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -126356,7 +135350,7 @@ func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -126400,7 +135394,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126462,7 +135456,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) } return ret, nil // { - // "description": "Retrieves the list of UrlMap resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of UrlMap resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.urlMaps.list", // "parameterOrder": [ @@ -126470,25 +135464,25 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -126548,8 +135542,7 @@ type UrlMapsPatchCall struct { // Patch: Patches the specified UrlMap resource with the data included // in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. (== suppress_warning -// http-rest-shadowed ==) +// JSON merge patch format and processing rules. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -126605,7 +135598,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126670,7 +135663,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.urlMaps.patch", // "parameterOrder": [ @@ -126726,7 +135719,7 @@ type UrlMapsUpdateCall struct { } // Update: Updates the specified UrlMap resource with the data included -// in the request. (== suppress_warning http-rest-shadowed ==) +// in the request. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -126782,7 +135775,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126847,7 +135840,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Updates the specified UrlMap resource with the data included in the request.", // "httpMethod": "PUT", // "id": "compute.urlMaps.update", // "parameterOrder": [ @@ -126904,7 +135897,7 @@ type UrlMapsValidateCall struct { // Validate: Runs static validation for the UrlMap. In particular, the // tests of the provided UrlMap will be run. Calling this method does -// NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==) +// NOT create the UrlMap. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -126941,7 +135934,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127006,7 +135999,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate } return ret, nil // { - // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", // "httpMethod": "POST", // "id": "compute.urlMaps.validate", // "parameterOrder": [ @@ -127055,8 +136048,7 @@ type VpnGatewaysAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of VPN gateways. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of VPN gateways. func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregatedListCall { c := &VpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127067,35 +136059,49 @@ func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregat // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *VpnGatewaysAggregatedListCall) Filter(filter string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *VpnGatewaysAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *VpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -127106,12 +136112,13 @@ func (c *VpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *VpnGateway // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *VpnGatewaysAggregatedListCall) OrderBy(orderBy string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -127119,7 +136126,7 @@ func (c *VpnGatewaysAggregatedListCall) OrderBy(orderBy string) *VpnGatewaysAggr } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -127163,7 +136170,7 @@ func (c *VpnGatewaysAggregatedListCall) Header() http.Header { func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127225,7 +136232,7 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa } return ret, nil // { - // "description": "Retrieves an aggregated list of VPN gateways. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of VPN gateways.", // "httpMethod": "GET", // "id": "compute.vpnGateways.aggregatedList", // "parameterOrder": [ @@ -127233,25 +136240,30 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -127309,8 +136321,7 @@ type VpnGatewaysDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified VPN gateway. (== suppress_warning -// http-rest-shadowed ==) +// Delete: Deletes the specified VPN gateway. func (r *VpnGatewaysService) Delete(project string, region string, vpnGateway string) *VpnGatewaysDeleteCall { c := &VpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127365,7 +136376,7 @@ func (c *VpnGatewaysDeleteCall) Header() http.Header { func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127426,7 +136437,7 @@ func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified VPN gateway.", // "httpMethod": "DELETE", // "id": "compute.vpnGateways.delete", // "parameterOrder": [ @@ -127488,8 +136499,7 @@ type VpnGatewaysGetCall struct { } // Get: Returns the specified VPN gateway. Gets a list of available VPN -// gateways by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// gateways by making a list() request. func (r *VpnGatewaysService) Get(project string, region string, vpnGateway string) *VpnGatewaysGetCall { c := &VpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127535,7 +136545,7 @@ func (c *VpnGatewaysGetCall) Header() http.Header { func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127599,7 +136609,7 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro } return ret, nil // { - // "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", // "httpMethod": "GET", // "id": "compute.vpnGateways.get", // "parameterOrder": [ @@ -127656,8 +136666,7 @@ type VpnGatewaysGetStatusCall struct { header_ http.Header } -// GetStatus: Returns the status for the specified VPN gateway. (== -// suppress_warning http-rest-shadowed ==) +// GetStatus: Returns the status for the specified VPN gateway. func (r *VpnGatewaysService) GetStatus(project string, region string, vpnGateway string) *VpnGatewaysGetStatusCall { c := &VpnGatewaysGetStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127703,7 +136712,7 @@ func (c *VpnGatewaysGetStatusCall) Header() http.Header { func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127767,7 +136776,7 @@ func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGateway } return ret, nil // { - // "description": "Returns the status for the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the status for the specified VPN gateway.", // "httpMethod": "GET", // "id": "compute.vpnGateways.getStatus", // "parameterOrder": [ @@ -127824,8 +136833,7 @@ type VpnGatewaysInsertCall struct { } // Insert: Creates a VPN gateway in the specified project and region -// using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// using the data included in the request. func (r *VpnGatewaysService) Insert(project string, region string, vpngateway *VpnGateway) *VpnGatewaysInsertCall { c := &VpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127880,7 +136888,7 @@ func (c *VpnGatewaysInsertCall) Header() http.Header { func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127945,7 +136953,7 @@ func (c *VpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.vpnGateways.insert", // "parameterOrder": [ @@ -128001,7 +137009,7 @@ type VpnGatewaysListCall struct { } // List: Retrieves a list of VPN gateways available to the specified -// project and region. (== suppress_warning http-rest-shadowed ==) +// project and region. func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysListCall { c := &VpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128013,24 +137021,25 @@ func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysLis // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *VpnGatewaysListCall) Filter(filter string) *VpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -128038,10 +137047,10 @@ func (c *VpnGatewaysListCall) Filter(filter string) *VpnGatewaysListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *VpnGatewaysListCall) MaxResults(maxResults int64) *VpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -128052,12 +137061,13 @@ func (c *VpnGatewaysListCall) MaxResults(maxResults int64) *VpnGatewaysListCall // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *VpnGatewaysListCall) OrderBy(orderBy string) *VpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) @@ -128065,7 +137075,7 @@ func (c *VpnGatewaysListCall) OrderBy(orderBy string) *VpnGatewaysListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) @@ -128109,7 +137119,7 @@ func (c *VpnGatewaysListCall) Header() http.Header { func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128172,7 +137182,7 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, } return ret, nil // { - // "description": "Retrieves a list of VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of VPN gateways available to the specified project and region.", // "httpMethod": "GET", // "id": "compute.vpnGateways.list", // "parameterOrder": [ @@ -128181,25 +137191,25 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -128266,8 +137276,7 @@ type VpnGatewaysSetLabelsCall struct { } // SetLabels: Sets the labels on a VpnGateway. To learn more about -// labels, read the Labeling Resources documentation. (== -// suppress_warning http-rest-shadowed ==) +// labels, read the Labeling Resources documentation. func (r *VpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnGatewaysSetLabelsCall { c := &VpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128323,7 +137332,7 @@ func (c *VpnGatewaysSetLabelsCall) Header() http.Header { func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128389,7 +137398,7 @@ func (c *VpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + // "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", // "id": "compute.vpnGateways.setLabels", // "parameterOrder": [ @@ -128454,7 +137463,7 @@ type VpnGatewaysTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. (== suppress_warning http-rest-shadowed ==) +// specified resource. func (r *VpnGatewaysService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *VpnGatewaysTestIamPermissionsCall { c := &VpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128491,7 +137500,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128557,7 +137566,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", // "id": "compute.vpnGateways.testIamPermissions", // "parameterOrder": [ @@ -128615,8 +137624,7 @@ type VpnTunnelsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of VPN tunnels. (== -// suppress_warning http-rest-shadowed ==) +// AggregatedList: Retrieves an aggregated list of VPN tunnels. func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregatedListCall { c := &VpnTunnelsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128627,35 +137635,49 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *VpnTunnelsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *VpnTunnelsAggregatedListCall) MaxResults(maxResults int64) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -128666,12 +137688,13 @@ func (c *VpnTunnelsAggregatedListCall) MaxResults(maxResults int64) *VpnTunnelsA // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *VpnTunnelsAggregatedListCall) OrderBy(orderBy string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) @@ -128679,7 +137702,7 @@ func (c *VpnTunnelsAggregatedListCall) OrderBy(orderBy string) *VpnTunnelsAggreg } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) @@ -128723,7 +137746,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128785,7 +137808,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun } return ret, nil // { - // "description": "Retrieves an aggregated list of VPN tunnels. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves an aggregated list of VPN tunnels.", // "httpMethod": "GET", // "id": "compute.vpnTunnels.aggregatedList", // "parameterOrder": [ @@ -128793,25 +137816,30 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -128869,8 +137897,7 @@ type VpnTunnelsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified VpnTunnel resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified VpnTunnel resource. func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel string) *VpnTunnelsDeleteCall { c := &VpnTunnelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128925,7 +137952,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128986,7 +138013,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified VpnTunnel resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified VpnTunnel resource.", // "httpMethod": "DELETE", // "id": "compute.vpnTunnels.delete", // "parameterOrder": [ @@ -129048,8 +138075,7 @@ type VpnTunnelsGetCall struct { } // Get: Returns the specified VpnTunnel resource. Gets a list of -// available VPN tunnels by making a list() request. (== -// suppress_warning http-rest-shadowed ==) +// available VPN tunnels by making a list() request. func (r *VpnTunnelsService) Get(project string, region string, vpnTunnel string) *VpnTunnelsGetCall { c := &VpnTunnelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129095,7 +138121,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129159,7 +138185,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) } return ret, nil // { - // "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", // "httpMethod": "GET", // "id": "compute.vpnTunnels.get", // "parameterOrder": [ @@ -129216,8 +138242,7 @@ type VpnTunnelsInsertCall struct { } // Insert: Creates a VpnTunnel resource in the specified project and -// region using the data included in the request. (== suppress_warning -// http-rest-shadowed ==) +// region using the data included in the request. func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *VpnTunnel) *VpnTunnelsInsertCall { c := &VpnTunnelsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129272,7 +138297,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129337,7 +138362,7 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", // "id": "compute.vpnTunnels.insert", // "parameterOrder": [ @@ -129393,8 +138418,7 @@ type VpnTunnelsListCall struct { } // List: Retrieves a list of VpnTunnel resources contained in the -// specified project and region. (== suppress_warning http-rest-shadowed -// ==) +// specified project and region. func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListCall { c := &VpnTunnelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129406,24 +138430,25 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { c.urlParams_.Set("filter", filter) return c @@ -129431,10 +138456,10 @@ func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *VpnTunnelsListCall) MaxResults(maxResults int64) *VpnTunnelsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -129445,12 +138470,13 @@ func (c *VpnTunnelsListCall) MaxResults(maxResults int64) *VpnTunnelsListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *VpnTunnelsListCall) OrderBy(orderBy string) *VpnTunnelsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -129458,7 +138484,7 @@ func (c *VpnTunnelsListCall) OrderBy(orderBy string) *VpnTunnelsListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -129502,7 +138528,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129565,7 +138591,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e } return ret, nil // { - // "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", // "httpMethod": "GET", // "id": "compute.vpnTunnels.list", // "parameterOrder": [ @@ -129574,25 +138600,25 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -129657,8 +138683,7 @@ type ZoneOperationsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified zone-specific Operations resource. (== -// suppress_warning http-rest-shadowed ==) +// Delete: Deletes the specified zone-specific Operations resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/delete func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -129695,7 +138720,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129731,7 +138756,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Deletes the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Deletes the specified zone-specific Operations resource.", // "httpMethod": "DELETE", // "id": "compute.zoneOperations.delete", // "parameterOrder": [ @@ -129784,8 +138809,7 @@ type ZoneOperationsGetCall struct { header_ http.Header } -// Get: Retrieves the specified zone-specific Operations resource. (== -// suppress_warning http-rest-shadowed ==) +// Get: Retrieves the specified zone-specific Operations resource. // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -129832,7 +138856,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129896,7 +138920,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Retrieves the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the specified zone-specific Operations resource.", // "httpMethod": "GET", // "id": "compute.zoneOperations.get", // "parameterOrder": [ @@ -129953,7 +138977,7 @@ type ZoneOperationsListCall struct { } // List: Retrieves a list of Operation resources contained within the -// specified zone. (== suppress_warning http-rest-shadowed ==) +// specified zone. // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall { c := &ZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -129966,24 +138990,25 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -129991,10 +139016,10 @@ func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -130005,12 +139030,13 @@ func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsLis // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ZoneOperationsListCall) OrderBy(orderBy string) *ZoneOperationsListCall { c.urlParams_.Set("orderBy", orderBy) @@ -130018,7 +139044,7 @@ func (c *ZoneOperationsListCall) OrderBy(orderBy string) *ZoneOperationsListCall } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsListCall { c.urlParams_.Set("pageToken", pageToken) @@ -130062,7 +139088,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130125,7 +139151,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves a list of Operation resources contained within the specified zone.", // "httpMethod": "GET", // "id": "compute.zoneOperations.list", // "parameterOrder": [ @@ -130134,25 +139160,25 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -130205,6 +139231,172 @@ func (c *ZoneOperationsListCall) Pages(ctx context.Context, f func(*OperationLis } } +// method id "compute.zoneOperations.wait": + +type ZoneOperationsWaitCall struct { + s *Service + project string + zone string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. +// +// This method is called on a best-effort basis. Specifically: +// - In uncommon cases, when the server is overloaded, the request might +// return before the default deadline is reached, or might return after +// zero seconds. +// - If the default deadline is reached, there is no guarantee that the +// operation is actually done when the method returns. Be prepared to +// retry if the operation is not `DONE`. +func (r *ZoneOperationsService) Wait(project string, zone string, operation string) *ZoneOperationsWaitCall { + c := &ZoneOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsWaitCall) Fields(s ...googleapi.Field) *ZoneOperationsWaitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsWaitCall) Context(ctx context.Context) *ZoneOperationsWaitCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ZoneOperationsWaitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}/wait") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.zoneOperations.wait" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ZoneOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + // "httpMethod": "POST", + // "id": "compute.zoneOperations.wait", + // "parameterOrder": [ + // "project", + // "zone", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations/{operation}/wait", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.zones.get": type ZonesGetCall struct { @@ -130218,8 +139410,7 @@ type ZonesGetCall struct { } // Get: Returns the specified Zone resource. Gets a list of available -// zones by making a list() request. (== suppress_warning -// http-rest-shadowed ==) +// zones by making a list() request. // For details, see https://cloud.google.com/compute/docs/reference/latest/zones/get func (r *ZonesService) Get(project string, zone string) *ZonesGetCall { c := &ZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -130265,7 +139456,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130328,7 +139519,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { } return ret, nil // { - // "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", // "httpMethod": "GET", // "id": "compute.zones.get", // "parameterOrder": [ @@ -130376,7 +139567,7 @@ type ZonesListCall struct { } // List: Retrieves the list of Zone resources available to the specified -// project. (== suppress_warning http-rest-shadowed ==) +// project. // For details, see https://cloud.google.com/compute/docs/reference/latest/zones/list func (r *ZonesService) List(project string) *ZonesListCall { c := &ZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -130388,24 +139579,25 @@ func (r *ZonesService) List(project string) *ZonesListCall { // filters resources listed in the response. The expression must specify // the field name, a comparison operator, and the value that you want to // use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // // For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // // You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` func (c *ZonesListCall) Filter(filter string) *ZonesListCall { c.urlParams_.Set("filter", filter) return c @@ -130413,10 +139605,10 @@ func (c *ZonesListCall) Filter(filter string) *ZonesListCall { // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c @@ -130427,12 +139619,13 @@ func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall { // order based on the resource name. // // You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. // -// Currently, only sorting by name or creationTimestamp desc is +// Currently, only sorting by `name` or `creationTimestamp desc` is // supported. func (c *ZonesListCall) OrderBy(orderBy string) *ZonesListCall { c.urlParams_.Set("orderBy", orderBy) @@ -130440,7 +139633,7 @@ func (c *ZonesListCall) OrderBy(orderBy string) *ZonesListCall { } // PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a +// token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { c.urlParams_.Set("pageToken", pageToken) @@ -130484,7 +139677,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200410") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130546,7 +139739,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { } return ret, nil // { - // "description": "Retrieves the list of Zone resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "description": "Retrieves the list of Zone resources available to the specified project.", // "httpMethod": "GET", // "id": "compute.zones.list", // "parameterOrder": [ @@ -130554,25 +139747,25 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 4431716d3..471ae259c 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -54,7 +54,7 @@ const ( // DefaultUploadChunkSize is the default chunk size to use for resumable // uploads if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 + DefaultUploadChunkSize = 16 * 1024 * 1024 // MinUploadChunkSize is the minimum chunk size that can be used for // resumable uploads. All user-specified chunk sizes must be multiple of diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 000000000..fedcce15b --- /dev/null +++ b/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index a6f9a2dea..75e9445e1 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -90,3 +90,16 @@ func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) } return google.JWTAccessTokenSourceFromJSON(data, audience) } + +// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. +// +// NOTE(cbro): consider promoting this to a field on google.Credentials. +func QuotaProjectFromCreds(cred *google.Credentials) string { + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(cred.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} diff --git a/vendor/google.golang.org/api/internal/gensupport/version.go b/vendor/google.golang.org/api/internal/gensupport/version.go new file mode 100644 index 000000000..23f6aa24e --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/version.go @@ -0,0 +1,53 @@ +// Copyright 2020 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "runtime" + "strings" + "unicode" +) + +// GoVersion returns the Go runtime version. The returned string +// has no whitespace. +func GoVersion() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go deleted file mode 100644 index 0680dd990..000000000 --- a/vendor/google.golang.org/api/internal/pool.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "errors" - - "google.golang.org/grpc/naming" -) - -// PoolResolver provides a fixed list of addresses to load balance between -// and does not provide further updates. -type PoolResolver struct { - poolSize int - dialOpt *DialSettings - ch chan []*naming.Update -} - -// NewPoolResolver returns a PoolResolver -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func NewPoolResolver(size int, o *DialSettings) *PoolResolver { - return &PoolResolver{poolSize: size, dialOpt: o} -} - -// Resolve returns a Watcher for the endpoint defined by the DialSettings -// provided to NewPoolResolver. -func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { - if r.dialOpt.Endpoint == "" { - return nil, errors.New("no endpoint configured") - } - addrs := make([]*naming.Update, 0, r.poolSize) - for i := 0; i < r.poolSize; i++ { - addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) - } - r.ch = make(chan []*naming.Update, 1) - r.ch <- addrs - return r, nil -} - -// Next returns a static list of updates on the first call, -// and blocks indefinitely until Close is called on subsequent calls. -func (r *PoolResolver) Next() ([]*naming.Update, error) { - return <-r.ch, nil -} - -// Close releases resources associated with the pool and causes Next to unblock. -func (r *PoolResolver) Close() { - close(r.ch) -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 544d715c8..33ba7ac12 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -6,6 +6,7 @@ package internal import ( + "crypto/tls" "errors" "net/http" @@ -18,6 +19,7 @@ import ( // Google API service. type DialSettings struct { Endpoint string + DefaultEndpoint string Scopes []string TokenSource oauth2.TokenSource Credentials *google.Credentials @@ -29,8 +31,11 @@ type DialSettings struct { HTTPClient *http.Client GRPCDialOpts []grpc.DialOption GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int NoAuth bool TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -70,6 +75,12 @@ func (ds *DialSettings) Validate() error { if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { return errors.New("multiple credential options provided") } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } if ds.HTTPClient != nil && ds.GRPCConn != nil { return errors.New("WithHTTPClient is incompatible with WithGRPCConn") } @@ -82,6 +93,12 @@ func (ds *DialSettings) Validate() error { if ds.HTTPClient != nil && ds.RequestReason != "" { return errors.New("WithHTTPClient is incompatible with RequestReason") } + if ds.HTTPClient != nil && ds.ClientCertSource != nil { + return errors.New("WithHTTPClient is incompatible with WithClientCertSource") + } + if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { + return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") + } return nil } diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go new file mode 100644 index 000000000..48121e42f --- /dev/null +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -0,0 +1,26 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internaloption contains options used internally by Google client code. +package internaloption + +import ( + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +type defaultEndpointOption string + +func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpoint = string(o) +} + +// WithDefaultEndpoint is an option that indicates the default endpoint. +// +// It should only be used internally by generated clients. +// +// This is similar to WithEndpoint, but allows us to determine whether the user has overriden the default endpoint. +func WithDefaultEndpoint(url string) option.ClientOption { + return defaultEndpointOption(url) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 0d9ae4e7a..b7c40d60a 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -6,6 +6,7 @@ package option import ( + "crypto/tls" "net/http" "golang.org/x/oauth2" @@ -142,6 +143,7 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. +// // This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) @@ -150,8 +152,7 @@ func WithGRPCConnectionPool(size int) ClientOption { type withGRPCConnectionPool int func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) - o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) + o.GRPCConnPoolSize = int(w) } // WithAPIKey returns a ClientOption that specifies an API key to be used @@ -228,11 +229,43 @@ func (w withRequestReason) Apply(o *internal.DialSettings) { // settings on gRPC and HTTP clients. // An example reason would be to bind custom telemetry that overrides the defaults. func WithTelemetryDisabled() ClientOption { - return withTelemetryDisabledOption{} + return withTelemetryDisabled{} } -type withTelemetryDisabledOption struct{} +type withTelemetryDisabled struct{} -func (w withTelemetryDisabledOption) Apply(o *internal.DialSettings) { +func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { o.TelemetryDisabled = true } + +// ClientCertSource is a function that returns a TLS client certificate to be used +// when opening TLS connections. +// +// It follows the same semantics as crypto/tls.Config.GetClientCertificate. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// WithClientCertSource returns a ClientOption that specifies a +// callback function for obtaining a TLS client certificate. +// +// This option is used for supporting mTLS authentication, where the +// server validates the client certifcate when establishing a connection. +// +// The callback function will be invoked whenever the server requests a +// certificate from the client. Implementations of the callback function +// should try to ensure that a valid certificate can be repeatedly returned +// on demand for the entire life cycle of the transport client. If a nil +// Certificate is returned (i.e. no Certificate can be obtained), an error +// should be returned. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithClientCertSource(s ClientCertSource) ClientOption { + return withClientCertSource{s} +} + +type withClientCertSource struct{ s ClientCertSource } + +func (w withClientCertSource) Apply(o *internal.DialSettings) { + o.ClientCertSource = w.s +} diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go new file mode 100644 index 000000000..c03af65fd --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -0,0 +1,110 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +var ( + defaultSourceOnce sync.Once + defaultSource Source + defaultSourceErr error +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// DefaultSource returns a certificate source that execs the command specified +// in the file at ~/.secureConnect/context_aware_metadata.json +// +// If that file does not exist, a nil source is returned. +func DefaultSource() (Source, error) { + defaultSourceOnce.Do(func() { + defaultSource, defaultSourceErr = newSecureConnectSource() + }) + return defaultSource, defaultSourceErr +} + +type secureConnectSource struct { + metadata secureConnectMetadata +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// newSecureConnectSource creates a secureConnectSource by reading the well-known file. +func newSecureConnectSource() (Source, error) { + user, err := user.Current() + if err != nil { + // Ignore. + return nil, nil + } + filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) + file, err := ioutil.ReadFile(filename) + if os.IsNotExist(err) { + // Ignore. + return nil, nil + } + if err != nil { + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + // TODO(cbro): consider caching valid certificates rather than exec'ing every time. + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + // TODO(cbro): read stderr for error message? Might contain sensitive info. + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 1ef67cefb..4848698db 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -9,14 +9,18 @@ package http import ( "context" + "crypto/tls" "errors" "net/http" + "net/url" + "strings" "go.opencensus.io/plugin/ochttp" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" "google.golang.org/api/option" + "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" ) @@ -28,15 +32,23 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? if settings.HTTPClient != nil { - return settings.HTTPClient, settings.Endpoint, nil + return settings.HTTPClient, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) if err != nil { return nil, "", err } - return &http.Client{Transport: trans}, settings.Endpoint, nil + return &http.Client{Transport: trans}, endpoint, nil } // NewTransport creates an http.RoundTripper for use communicating with a Google @@ -53,13 +65,13 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl } func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { - trans := base - trans = parameterTransport{ - base: trans, + paramTransport := ¶meterTransport{ + base: base, userAgent: settings.UserAgent, quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } + var trans http.RoundTripper = paramTransport trans = addOCTransport(trans, settings) switch { case settings.NoAuth: @@ -74,6 +86,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + if paramTransport.quotaProject == "" { + paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) + } trans = &oauth2.Transport{ Base: trans, Source: creds.TokenSource, @@ -104,7 +119,7 @@ type parameterTransport struct { base http.RoundTripper } -func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base if rt == nil { return nil, errors.New("transport: no Transport specified") @@ -134,11 +149,23 @@ func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) var appengineUrlfetchHook func(context.Context) http.RoundTripper // defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. -func defaultBaseTransport(ctx context.Context) http.RoundTripper { +// On App Engine, this is urlfetch.Transport. +// If TLSCertificate is available, return a custom Transport with TLSClientConfig. +// Otherwise, return http.DefaultTransport. +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } + + if clientCertSource != nil { + // TODO (cbro): copy default transport settings from http.DefaultTransport + return &http.Transport{ + TLSClientConfig: &tls.Config{ + GetClientCertificate: clientCertSource, + }, + } + } + return http.DefaultTransport } @@ -151,3 +178,102 @@ func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) ht Propagation: &propagation.HTTPFormat{}, } } + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// The overall logic is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { + return settings.ClientCertSource, nil + // TODO(andyzhao): Currently, many services including compute, storage, and bigquery + // do not have working mTLS endpoints, so we will disable the ADC for DCA logic + // until we can confirm that all services have working mTLS endpoints. + /* + if settings.HTTPClient != nil { + return nil, nil // HTTPClient is incompatible with ClientCertificateSource + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSoure() + } + */ +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint" +// +// If no endpoint override is specified, we will return the default endpoint (or +// the default mTLS endpoint if a client certificate is available). +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + if clientCertSource != nil { + return generateDefaultMtlsEndpoint(settings.DefaultEndpoint), nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + return "", errors.New("WithEndpoint requires a full URL path") + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func mergeEndpoints(base, newHost string) (string, error) { + u, err := url.Parse(base) + if err != nil { + return "", err + } + u.Host = newHost + return u.String(), nil +} + +// generateDefaultMtlsEndpoint attempts to derive the mTLS version of the +// defaultEndpoint via regex, and returns defaultEndpoint if unsuccessful. +// +// We need to applying the following 2 transformations: +// 1. pubsub.googleapis.com to pubsub.mtls.googleapis.com +// 2. pubsub.sandbox.googleapis.com to pubsub.mtls.sandbox.googleapis.com +// +// TODO(andyzhao): In the future, the mTLS endpoint will be read from the Discovery Document +// and passed in as defaultMtlsEndpoint instead of generated from defaultEndpoint, +// and this function will be removed. +func generateDefaultMtlsEndpoint(defaultEndpoint string) string { + var domains = []string{ + ".sandbox.googleapis.com", // must come first because .googleapis.com is a substring + ".googleapis.com", + } + for _, domain := range domains { + if strings.Contains(defaultEndpoint, domain) { + return strings.Replace(defaultEndpoint, domain, ".mtls"+domain, -1) + } + } + return defaultEndpoint +} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml index 70ffe89d5..6d03f4d36 100644 --- a/vendor/google.golang.org/appengine/.travis.yml +++ b/vendor/google.golang.org/appengine/.travis.yml @@ -10,8 +10,6 @@ script: matrix: include: - - go: 1.8.x - env: GOAPP=true - go: 1.9.x env: GOAPP=true - go: 1.10.x diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod deleted file mode 100644 index 635c34f5a..000000000 --- a/vendor/google.golang.org/appengine/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module google.golang.org/appengine - -go 1.11 - -require ( - github.com/golang/protobuf v1.3.1 - golang.org/x/net v0.0.0-20190603091049-60506f45cf65 - golang.org/x/text v0.3.2 -) diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum deleted file mode 100644 index ce22f6856..000000000 --- a/vendor/google.golang.org/appengine/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index a6ec19e14..721053c20 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -58,8 +58,11 @@ var ( apiHTTPClient = &http.Client{ Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + MaxIdleConns: 1000, + MaxIdleConnsPerHost: 10000, + IdleConnTimeout: 90 * time.Second, }, } diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 063d724cb..e79a53884 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.22.0 -// protoc v3.11.2 +// protoc-gen-go v1.25.0 +// protoc v3.13.0 // source: google/rpc/status.proto package status @@ -25,9 +25,9 @@ import ( sync "sync" proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" ) const ( @@ -61,7 +61,7 @@ type Status struct { Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` } func (x *Status) Reset() { @@ -110,7 +110,7 @@ func (x *Status) GetMessage() string { return "" } -func (x *Status) GetDetails() []*any.Any { +func (x *Status) GetDetails() []*anypb.Any { if x != nil { return x.Details } @@ -154,8 +154,8 @@ func file_google_rpc_status_proto_rawDescGZIP() []byte { var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_rpc_status_proto_goTypes = []interface{}{ - (*Status)(nil), // 0: google.rpc.Status - (*any.Any)(nil), // 1: google.protobuf.Any + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any } var file_google_rpc_status_proto_depIdxs = []int32{ 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db5..800e7bd4c 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -93,6 +93,22 @@ To build Go code, there are several options: #### Compiling error, undefined: grpc.SupportPackageIsVersion +##### If you are using Go modules: + +Please ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +``` +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +##### If you are *not* using Go modules: + Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` @@ -114,6 +130,10 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d952f09f3..80559b80a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -21,6 +21,7 @@ package base import ( "context" "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -76,6 +77,9 @@ type baseBalancer struct { picker balancer.Picker v2Picker balancer.V2Picker config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure } func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { @@ -83,13 +87,23 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) } func (b *baseBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - if b.picker != nil { - b.picker = NewErrPicker(err) - } else { - b.v2Picker = NewErrPickerV2(err) - } + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.v2Picker, + }) } } @@ -99,6 +113,8 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { if grpclog.V(2) { grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { @@ -124,27 +140,41 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // The entry will be deleted in HandleSubConnStateChange. } } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } return nil } +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - errPicker if the balancer is in TransientFailure, // - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker(err error) { +func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { if b.pickerBuilder != nil { b.picker = NewErrPicker(balancer.ErrTransientFailure) } else { - if err != nil { - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(err)) - } else { - // This means the last subchannel transition was not to - // TransientFailure (otherwise err must be set), but the - // aggregate state of the balancer is TransientFailure, meaning - // there are no other addresses. - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(errors.New("resolver returned no addresses"))) - } + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) } return } @@ -187,6 +217,12 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } b.scStates[sc] = s switch s { case connectivity.Idle: @@ -195,19 +231,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError } - oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - b.regeneratePicker(state.ConnectionError) + b.state == connectivity.TransientFailure { + b.regeneratePicker() } if b.picker != nil { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 824f28e74..f8667a23f 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -24,8 +24,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) @@ -245,7 +245,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f58740b25..0740693b7 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -35,10 +35,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -151,7 +151,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -161,10 +161,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + channelz.Info(cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -197,12 +194,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = newProxyDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - }, - ) + cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + } + if cc.dopts.withProxy { + cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) + } } if cc.dopts.copts.UserAgent != "" { @@ -241,14 +239,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.parsedTarget = grpcutil.ParseTarget(cc.target) + channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { // If resolver builder is still nil, the parsed target's scheme is // not registered. Fallback to default resolver and set Endpoint to // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), Endpoint: target, @@ -416,12 +414,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -671,9 +664,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - grpclog.Infof("ClientConn switching balancer to %q", name) + channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -681,22 +674,12 @@ func (cc *ClientConn) switchBalancer(name string) { } builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() + } else { + channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() @@ -720,6 +703,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ + state: connectivity.Idle, cc: cc, addrs: addrs, scopts: opts, @@ -736,7 +720,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -834,7 +818,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -854,7 +838,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -1025,7 +1009,7 @@ func (cc *ClientConn) Close() error { Severity: channelz.CtINFO, } } - channelz.AddTraceEvent(cc.channelzID, ted) + channelz.AddTraceEvent(cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -1068,15 +1052,8 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } + channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1213,12 +1190,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { @@ -1300,7 +1272,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } @@ -1308,7 +1280,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1355,7 +1327,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - grpclog.Error("Health check is requested but health check function is not set.") + channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1385,15 +1357,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1458,7 +1424,7 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -1560,9 +1526,9 @@ var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { - if cc.parsedTarget.Scheme == rb.Scheme() { + if scheme == rb.Scheme() { return rb } } - return resolver.Get(cc.parsedTarget.Scheme) + return resolver.Get(scheme) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 845ce5d21..e438fda22 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -100,7 +100,11 @@ type ProtocolInfo struct { ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string - // SecurityVersion is the security protocol version. + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 28b4f6232..86e956bc8 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -135,16 +135,26 @@ func NewTLS(c *tls.Config) TransportCredentials { return tc } -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 63f5ae21d..35bde1033 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -72,6 +72,7 @@ type dialOptions struct { // we need to be able to configure this in tests. resolveNowBackoff func(int) time.Duration resolvers []resolver.Builder + withProxy bool } // DialOption configures how we set up the connection. @@ -307,6 +308,16 @@ func WithInsecure() DialOption { }) } +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// This API is EXPERIMENTAL. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.withProxy = false + }) +} + // WithTransportCredentials returns a DialOption which configures a connection // level security credentials (e.g., TLS/SSL). This should not be used together // with WithCredentialsBundle. @@ -557,6 +568,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, }, resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + withProxy: true, } } diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod deleted file mode 100644 index 237836130..000000000 --- a/vendor/google.golang.org/grpc/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module google.golang.org/grpc - -go 1.11 - -require ( - github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 - github.com/envoyproxy/protoc-gen-validate v0.1.0 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.2.0 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 -) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum deleted file mode 100644 index dd5d0cee7..000000000 --- a/vendor/google.golang.org/grpc/go.sum +++ /dev/null @@ -1,53 +0,0 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 874ea6d98..c8bb2be34 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -26,64 +26,70 @@ // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. package grpclog // import "google.golang.org/grpc/grpclog" -import "os" +import ( + "os" -var logger = newLoggerV2() + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return logger.V(l) + return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...interface{}) { - logger.Warning(args...) + grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) + grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...interface{}) { - logger.Warningln(args...) + grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...interface{}) { - logger.Error(args...) + grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) + grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...interface{}) { - logger.Errorln(args...) + grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...interface{}) { - logger.Fatal(args...) + grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -91,7 +97,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) + grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. func Fatalln(args ...interface{}) { - logger.Fatalln(args...) + grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) { // // Deprecated: use Info. func Print(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 097494f71..ef06a4822 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,6 +18,8 @@ package grpclog +import "google.golang.org/grpc/internal/grpclog" + // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. @@ -35,7 +37,7 @@ type Logger interface { // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} + grpclog.Logger = &loggerWrapper{Logger: l} } // loggerWrapper wraps Logger into a LoggerV2. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index d49325776..23612b7c4 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -24,6 +24,8 @@ import ( "log" "os" "strconv" + + "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. @@ -65,7 +67,8 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { - logger = l + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } const ( @@ -193,3 +196,19 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) { func (g *loggerT) V(l int) bool { return l <= g.v } + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f993..e4252e5be 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -30,7 +30,7 @@ import ( "sync/atomic" "time" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpclog" ) const ( @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") + grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") + grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") + grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,7 +294,19 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { +func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUNKNOWN: + grpclog.InfoDepth(depth+1, d.Desc) + case CtINFO: + grpclog.InfoDepth(depth+1, d.Desc) + case CtWarning: + grpclog.WarningDepth(depth+1, d.Desc) + case CtError: + grpclog.ErrorDepth(depth+1, d.Desc) + } + } if getMaxTraceEntry() == 0 { return } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..59c7bedec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// Info logs through grpclog.Info and adds a trace event if channelz is on. +func Info(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, args...) + } +} + +// Infof logs through grpclog.Infof and adds a trace event if channelz is on. +func Infof(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, msg) + } +} + +// Warning logs through grpclog.Warning and adds a trace event if channelz is on. +func Warning(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, args...) + } +} + +// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. +func Warningf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, msg) + } +} + +// Error logs through grpclog.Error and adds a trace event if channelz is on. +func Error(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, args...) + } +} + +// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. +func Errorf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..8c8e19fce --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Info(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warning(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Error(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatal(args...) + } +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..f6e0dc1da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + } + Logger.Infof(format, args...) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Warningf(format, args...) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Errorf(format, args...) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if Logger.V(2) { + pl.Infof(format, args...) + } +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(prefix string) *PrefixLogger { + return &PrefixLogger{prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 000000000..80b33cdaf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func ParseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0912f0bf4..c6fbe8bb1 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -37,11 +37,6 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // StatusRawProto is exported by status/status.go. This func returns a - // pointer to the wrapped Status proto for a given status.Status without a - // call to proto.Clone(). The returned Status proto should not be mutated by - // the caller. - StatusRawProto interface{} // func (*status.Status) *spb.Status // NewRequestInfoContext creates a new context based on the argument context attaching // the passed in RequestInfo to the new context. NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..23dae8e56 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,166 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*Error)(s.Proto()) +} + +func (s *Status) Error() string { + p := s.Proto() + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Error is an alias of a status proto. It implements error and Status, +// and a nil Error should never be returned by this package. +type Error spb.Status + +func (se *Error) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +// GRPCStatus returns the Status represented by se. +func (se *Error) GRPCStatus() *Status { + return FromProto((*spb.Status)(se)) +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (se *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index c3c32dafe..fc44e9761 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -112,11 +112,10 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration headerMD metadata.MD @@ -186,8 +185,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } // And flush, in case no header or body has been sent yet. // This forces a separation of headers and trailers if this is the @@ -227,6 +229,8 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if err == nil { // transport has not been closed if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) @@ -236,14 +240,16 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro return err } +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -262,9 +268,30 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } } +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() return ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } ht.rw.Write(hdr) ht.rw.Write(data) ht.rw.(http.Flusher).Flush() @@ -272,27 +299,27 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - v = encodeMetadataHeader(k, v) - h.Add(k, v) - } + if !headersWritten { + ht.writePendingHeaders(s) } + ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) if err == nil { if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), + Header: md.Copy(), + Compression: s.sendCompress, }) } } @@ -338,7 +365,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 2d6feeb1b..1cc586f73 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -680,14 +680,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } if t.statsHandler != nil { - header, _, _ := metadata.FromOutgoingContextRaw(ctx) + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, - Header: header.Copy(), + Header: header, } t.statsHandler.HandleRPC(s.ctx, outHeader) } @@ -1188,9 +1195,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if t.statsHandler != nil { if isHeader { inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, } t.statsHandler.HandleRPC(s.ctx, inHeader) } else { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 8b04b0392..fa33ffb18 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,11 +35,9 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -57,9 +55,6 @@ var ( // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") - // statusRawProto is a function to get to the raw status proto wrapped in a - // status.Status without a proto.Clone(). - statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) ) // serverConnectionCounter counts the number of connections a server has seen @@ -813,10 +808,11 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { return ErrHeaderListSizeLimitViolation } if t.stats != nil { - // Note: WireLength is not set in outHeader. - // TODO(mmukhi): Revisit this later, if needed. + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ - Header: s.header.Copy(), + Header: s.header.Copy(), + Compression: s.sendCompress, } t.stats.HandleRPC(s.Context(), outHeader) } @@ -849,7 +845,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. @@ -880,6 +876,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 3eaf724cd..edfda866c 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" @@ -46,34 +45,6 @@ type ccResolverWrapper struct { polling chan struct{} } -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} - // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { @@ -169,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } @@ -181,13 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } - grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver reported error: %v", err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } @@ -196,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } @@ -210,20 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { - grpclog.Infof("Service config lookups disabled; ignoring config") + channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } @@ -256,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index d3a4adc5e..cf9dbe7fd 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -287,13 +287,14 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo) {} -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. +// size in bytes the client can receive. // This is an EXPERIMENTAL API. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int @@ -305,13 +306,14 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { } func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. +// size in bytes the client can send. // This is an EXPERIMENTAL API. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0d75cb109..edfcdcaee 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -116,6 +116,8 @@ type serverOptions struct { dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle statsHandler stats.Handler maxConcurrentStreams uint32 @@ -311,6 +313,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { }) } +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { @@ -322,6 +334,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { }) } +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for stream RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { @@ -404,6 +426,8 @@ func NewServer(opt ...ServerOption) *Server { done: grpcsync.NewEvent(), czData: new(channelzData), } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) @@ -658,7 +682,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -705,7 +729,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -844,12 +868,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) + channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) + channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -864,6 +888,40 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return err } +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { sh := s.opts.statsHandler if sh != nil || trInfo != nil || channelz.IsOn() { @@ -989,7 +1047,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } } return err @@ -1034,7 +1092,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1061,9 +1119,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1113,6 +1171,40 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() @@ -1297,7 +1389,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1338,7 +1430,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 9e22c393f..a7970c79a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -81,6 +81,10 @@ type InHeader struct { Client bool // WireLength is the wire length of header. WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -89,10 +93,6 @@ type InHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata received. - Header metadata.MD } // IsClient indicates if the stats information is from client side. @@ -141,6 +141,10 @@ func (s *OutPayload) isRPCStats() {} type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -149,10 +153,6 @@ type OutHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata sent. - Header metadata.MD } // IsClient indicates if this stats information is from client side. @@ -165,6 +165,9 @@ type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index a1348e9b1..01e182c30 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,88 +29,23 @@ package status import ( "context" - "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/status" ) -func init() { - internal.StatusRawProto = statusRawProto -} - -func statusRawProto(s *Status) *spb.Status { return s.s } - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Is implements future error.Is functionality. -// A statusError is equivalent if the code and message are identical. -func (se *statusError) Is(target error) bool { - tse, ok := target.(*statusError) - if !ok { - return false - } - - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). @@ -135,7 +70,7 @@ func ErrorProto(s *spb.Status) error { // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} + return status.FromProto(s) } // FromError returns a Status representing err if it was produced from this @@ -160,42 +95,6 @@ func Convert(err error) *Status { return s } -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - // Code returns the Code of the error if it is a Status error, codes.OK if err // is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb99940e3..934ef6832 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -498,13 +497,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index cf193820f..d96662c27 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.27.0" +const Version = "1.29.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 0e7370727..e12024fb8 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -1,20 +1,22 @@ #!/bin/bash -if [[ `uname -a` = *"Darwin"* ]]; then - echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" - exit 1 -fi - set -ex # Exit on error; debugging enabled. set -o pipefail # Fail a pipe if any sub-command fails. +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + die() { echo "$@" >&2 exit 1 } fail_on_output() { - tee /dev/stderr | (! read) + tee /dev/stderr | not read } # Check to make sure it's safe to modify the user's git repo. @@ -60,7 +62,7 @@ if [[ "$1" = "-install" ]]; then unzip ${PROTOC_FILENAME} bin/protoc --version popd - elif ! which protoc > /dev/null; then + elif not which protoc > /dev/null; then die "Please install protoc into your path" fi fi @@ -70,21 +72,21 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go # - Do not import x/net/context. -(! git grep -l 'x/net/context' -- "*.go") +not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Ensure all ptypes proto packages are renamed when importing. -(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support @@ -92,9 +94,9 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . +goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go" +golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:" +go vet -all ./... misspell -error . @@ -119,9 +121,9 @@ fi SC_OUT="$(mktemp)" staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. -(! grep -v "is deprecated:.*SA1019" "${SC_OUT}") +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. -(! grep -Fv '.HandleResolvedAddrs +not grep -Fv '.HandleResolvedAddrs .HandleSubConnStateChange .HeaderMap .NewAddress @@ -151,9 +153,11 @@ grpc.WithMaxMsgSize grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier +info.SecurityVersion naming.Resolver naming.Update naming.Watcher resolver.Backend resolver.GRPCLB' "${SC_OUT}" -) + +echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index c2f8f28f2..179d6e8fc 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -6,14 +6,13 @@ package prototext import ( "fmt" - "strings" "unicode/utf8" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" @@ -23,6 +22,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -51,8 +51,9 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using options in -// UnmarshalOptions object. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -108,7 +109,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return errors.New("no support for proto1 MessageSets") } - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { return d.unmarshalAny(m, checkDelims) } @@ -158,21 +159,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch tok.NameKind() { case text.IdentName: name = pref.Name(tok.IdentName()) - fd = fieldDescs.ByName(name) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textproto field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true @@ -269,15 +260,6 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return nil } -// findExtension returns protoreflect.ExtensionType from the Resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { @@ -538,14 +520,13 @@ Loop: return d.unexpectedTokenError(tok) } - name := tok.IdentName() - switch name { - case "key": + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } if key.IsValid() { - return d.newError(tok.Pos(), `map entry "key" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } val, err := d.unmarshalScalar(fd.MapKey()) if err != nil { @@ -553,14 +534,14 @@ Loop: } key = val.MapKey() - case "value": + case genid.MapEntry_Value_field_name: if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } } if pval.IsValid() { - return d.newError(tok.Pos(), `map entry "value" cannot be repeated`) + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) } pval, err = unmarshalMapValue() if err != nil { @@ -597,13 +578,9 @@ Loop: func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { var typeURL string var bValue []byte - - // hasFields tracks which valid fields have been seen in the loop below in - // order to flag an error if there are duplicates or conflicts. It may - // contain the strings "type_url", "value" and "expanded". The literal - // "expanded" is used to indicate that the expanded form has been - // encountered already. - hasFields := map[string]bool{} + var seenTypeUrl bool + var seenValue bool + var isExpanded bool if checkDelims { tok, err := d.Read() @@ -642,12 +619,12 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch tok.IdentName() { - case "type_url": - if hasFields["type_url"] { - return d.newError(tok.Pos(), "duplicate Any type_url field") + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -657,15 +634,15 @@ Loop: var ok bool typeURL, ok = tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) } - hasFields["type_url"] = true + seenTypeUrl = true - case "value": - if hasFields["value"] { - return d.newError(tok.Pos(), "duplicate Any value field") + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) } - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) } tok, err := d.Read() @@ -674,22 +651,22 @@ Loop: } s, ok := tok.String() if !ok { - return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString()) + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) } bValue = []byte(s) - hasFields["value"] = true + seenValue = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } case text.TypeName: - if hasFields["expanded"] { + if isExpanded { return d.newError(tok.Pos(), "cannot have more than one type") } - if hasFields["type_url"] { + if seenTypeUrl { return d.newError(tok.Pos(), "conflict with type_url field") } typeURL = tok.TypeName() @@ -698,21 +675,21 @@ Loop: if err != nil { return err } - hasFields["expanded"] = true + isExpanded = true default: if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) } } } fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) } return nil } @@ -767,9 +744,6 @@ func (d decoder) skipValue() error { // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. - if err := d.skipValue(); err != nil { - return err - } } } } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 41e5c773c..8d5304dc5 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "sort" "strconv" "unicode/utf8" @@ -14,12 +13,13 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/encoding/text" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/fieldnum" "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -162,42 +162,22 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // Handle Any expansion. - if messageDesc.FullName() == "google.protobuf.Any" { + if messageDesc.FullName() == genid.Any_message_fullname { if e.marshalAny(m) { return nil } // If unable to expand, continue on to marshal Any as a regular message. } - // Marshal known fields. - fieldDescs := messageDesc.Fields() - size := fieldDescs.Len() - for i := 0; i < size; { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - - if fd == nil || !m.Has(fd) { - continue - } - - name := fd.Name() - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = fd.Message().Name() - } - val := m.Get(fd) - if err := e.marshalField(string(name), val, fd); err != nil { - return err + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false } - } - - // Marshal extensions. - if err := e.marshalExtensions(m); err != nil { + return true + }) + if err != nil { return err } @@ -290,18 +270,18 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto // marshalMap marshals the given protoreflect.Map as multiple name-value fields. func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { var err error - mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() - e.WriteName("key") + e.WriteName(string(genid.MapEntry_Key_field_name)) err = e.marshalSingular(key.Value(), fd.MapKey()) if err != nil { return false } - e.WriteName("value") + e.WriteName(string(genid.MapEntry_Value_field_name)) err = e.marshalSingular(val, fd.MapValue()) if err != nil { return false @@ -311,48 +291,6 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) return err } -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true - } - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) - return true - }) - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // Extension field name is the proto field name enclosed in []. - name := "[" + entry.key + "]" - if err := e.marshalField(name, entry.value, entry.desc); err != nil { - return err - } - } - return nil -} - // marshalUnknown parses the given []byte and marshals fields out. // This function assumes proper encoding in the given []byte. func (e encoder) marshalUnknown(b []byte) { @@ -399,7 +337,7 @@ func (e encoder) marshalUnknown(b []byte) { func (e encoder) marshalAny(any pref.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() - fdType := fds.ByNumber(fieldnum.Any_TypeUrl) + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) typeURL := any.Get(fdType).String() mt, err := e.opts.Resolver.FindMessageByURL(typeURL) if err != nil { @@ -408,7 +346,7 @@ func (e encoder) marshalAny(any pref.Message) bool { m := mt.New().Interface() // Unmarshal bytes into embedded message. - fdValue := fds.ByNumber(fieldnum.Any_Value) + fdValue := fds.ByNumber(genid.Any_Value_field_number) value := any.Get(fdValue) err = proto.UnmarshalOptions{ AllowPartial: true, diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index e7af0fe0d..360c63329 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -42,6 +42,8 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { name = "FileImports" case pref.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() } start, end = name+"{", "}" } diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go index a904dd1f9..49c8676d4 100644 --- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -26,6 +26,14 @@ func Bool() bool { return randSeed%2 == 1 } +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + // randSeed is a best-effort at an approximate hash of the Go binary. var randSeed = binaryHash() diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index b1eeea507..c1866f3c1 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -11,10 +11,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" ) -// The MessageSet wire format is equivalent to a message defiend as follows, +// The MessageSet wire format is equivalent to a message defined as follows, // where each Item defines an extension field with a field number of 'type_id' // and content of 'message'. MessageSet extensions must be non-repeated message // fields. @@ -48,33 +47,17 @@ func IsMessageSet(md pref.MessageDescriptor) bool { return ok && xmd.IsMessageSet() } -// IsMessageSetExtension reports this field extends a MessageSet. +// IsMessageSetExtension reports this field properly extends a MessageSet. func IsMessageSetExtension(fd pref.FieldDescriptor) bool { - if fd.Name() != ExtensionName { + switch { + case fd.Name() != ExtensionName: return false - } - if fd.FullName().Parent() != fd.Message().FullName() { + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): return false } - return IsMessageSet(fd.ContainingMessage()) -} - -// FindMessageSetExtension locates a MessageSet extension field by name. -// In text and JSON formats, the extension name used is the message itself. -// The extension field name is derived by appending ExtensionName. -func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { - name := s.Append(ExtensionName) - xt, err := r.FindExtensionByName(name) - if err != nil { - if err == preg.NotFound { - return nil, err - } - return nil, errors.Wrap(err, "%q", name) - } - if !IsMessageSetExtension(xt.TypeDescriptor()) { - return nil, preg.NotFound - } - return xt, nil + return true } // SizeField returns the size of a MessageSet item field containing an extension diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 16c02d7b6..38f1931c6 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -104,7 +104,7 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { - f.L1.JSONName.Init(jsonName) + f.L1.StringName.InitJSON(jsonName) } case s == "packed": f.L1.HasPacked = true diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index c4ba1c598..da289ccce 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -32,7 +32,6 @@ type Encoder struct { encoderState indent string - newline string // set to "\n" if len(indent) > 0 delims [2]byte outputASCII bool } @@ -61,7 +60,6 @@ func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, erro return nil, errors.New("indent may only be composed of space and tab characters") } e.indent = indent - e.newline = "\n" } switch delims { case [2]byte{0, 0}: @@ -126,7 +124,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // are used to represent both the proto string and bytes type. r = rune(in[0]) fallthrough - case r < ' ' || r == '"' || r == '\\': + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: out = append(out, '\\') switch r { case '"', '\\': @@ -143,7 +141,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { out = strconv.AppendUint(out, uint64(r), 16) } in = in[n:] - case outputASCII && r >= utf8.RuneSelf: + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): out = append(out, '\\') if r <= math.MaxUint16 { out = append(out, 'u') @@ -168,7 +166,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // escaping. If no characters need escaping, this returns the input length. func indexNeedEscapeInString(s string) int { for i := 0; i < len(s); i++ { - if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { return i } } @@ -265,3 +263,8 @@ func (e *Encoder) Snapshot() encoderState { func (e *Encoder) Reset(es encoderState) { e.encoderState = es } + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go deleted file mode 100644 index 74c5fef24..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Any. -const ( - Any_TypeUrl = 1 // optional string - Any_Value = 2 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go deleted file mode 100644 index 9a6b5f29b..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Api. -const ( - Api_Name = 1 // optional string - Api_Methods = 2 // repeated google.protobuf.Method - Api_Options = 3 // repeated google.protobuf.Option - Api_Version = 4 // optional string - Api_SourceContext = 5 // optional google.protobuf.SourceContext - Api_Mixins = 6 // repeated google.protobuf.Mixin - Api_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Method. -const ( - Method_Name = 1 // optional string - Method_RequestTypeUrl = 2 // optional string - Method_RequestStreaming = 3 // optional bool - Method_ResponseTypeUrl = 4 // optional string - Method_ResponseStreaming = 5 // optional bool - Method_Options = 6 // repeated google.protobuf.Option - Method_Syntax = 7 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Mixin. -const ( - Mixin_Name = 1 // optional string - Mixin_Root = 2 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go deleted file mode 100644 index 6e37b59e9..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto -) - -// Field numbers for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_Name = 1 // optional string - FileDescriptorProto_Package = 2 // optional string - FileDescriptorProto_Dependency = 3 // repeated string - FileDescriptorProto_PublicDependency = 10 // repeated int32 - FileDescriptorProto_WeakDependency = 11 // repeated int32 - FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto - FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto - FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto - FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto - FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions - FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo - FileDescriptorProto_Syntax = 12 // optional string -) - -// Field numbers for google.protobuf.DescriptorProto. -const ( - DescriptorProto_Name = 1 // optional string - DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto - DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto - DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto - DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange - DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto - DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions - DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange - DescriptorProto_ReservedName = 10 // repeated string -) - -// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_Start = 1 // optional int32 - DescriptorProto_ExtensionRange_End = 2 // optional int32 - DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions -) - -// Field numbers for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_Start = 1 // optional int32 - DescriptorProto_ReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_Name = 1 // optional string - FieldDescriptorProto_Number = 3 // optional int32 - FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label - FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type - FieldDescriptorProto_TypeName = 6 // optional string - FieldDescriptorProto_Extendee = 2 // optional string - FieldDescriptorProto_DefaultValue = 7 // optional string - FieldDescriptorProto_OneofIndex = 9 // optional int32 - FieldDescriptorProto_JsonName = 10 // optional string - FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions - FieldDescriptorProto_Proto3Optional = 17 // optional bool -) - -// Field numbers for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_Name = 1 // optional string - OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions -) - -// Field numbers for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_Name = 1 // optional string - EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto - EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions - EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange - EnumDescriptorProto_ReservedName = 5 // repeated string -) - -// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32 - EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32 -) - -// Field numbers for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_Name = 1 // optional string - EnumValueDescriptorProto_Number = 2 // optional int32 - EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions -) - -// Field numbers for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_Name = 1 // optional string - ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto - ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions -) - -// Field numbers for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_Name = 1 // optional string - MethodDescriptorProto_InputType = 2 // optional string - MethodDescriptorProto_OutputType = 3 // optional string - MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions - MethodDescriptorProto_ClientStreaming = 5 // optional bool - MethodDescriptorProto_ServerStreaming = 6 // optional bool -) - -// Field numbers for google.protobuf.FileOptions. -const ( - FileOptions_JavaPackage = 1 // optional string - FileOptions_JavaOuterClassname = 8 // optional string - FileOptions_JavaMultipleFiles = 10 // optional bool - FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool - FileOptions_JavaStringCheckUtf8 = 27 // optional bool - FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode - FileOptions_GoPackage = 11 // optional string - FileOptions_CcGenericServices = 16 // optional bool - FileOptions_JavaGenericServices = 17 // optional bool - FileOptions_PyGenericServices = 18 // optional bool - FileOptions_PhpGenericServices = 42 // optional bool - FileOptions_Deprecated = 23 // optional bool - FileOptions_CcEnableArenas = 31 // optional bool - FileOptions_ObjcClassPrefix = 36 // optional string - FileOptions_CsharpNamespace = 37 // optional string - FileOptions_SwiftPrefix = 39 // optional string - FileOptions_PhpClassPrefix = 40 // optional string - FileOptions_PhpNamespace = 41 // optional string - FileOptions_PhpMetadataNamespace = 44 // optional string - FileOptions_RubyPackage = 45 // optional string - FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MessageOptions. -const ( - MessageOptions_MessageSetWireFormat = 1 // optional bool - MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool - MessageOptions_Deprecated = 3 // optional bool - MessageOptions_MapEntry = 7 // optional bool - MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.FieldOptions. -const ( - FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType - FieldOptions_Packed = 2 // optional bool - FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType - FieldOptions_Lazy = 5 // optional bool - FieldOptions_Deprecated = 3 // optional bool - FieldOptions_Weak = 10 // optional bool - FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.OneofOptions. -const ( - OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumOptions. -const ( - EnumOptions_AllowAlias = 2 // optional bool - EnumOptions_Deprecated = 3 // optional bool - EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_Deprecated = 1 // optional bool - EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.ServiceOptions. -const ( - ServiceOptions_Deprecated = 33 // optional bool - ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.MethodOptions. -const ( - MethodOptions_Deprecated = 33 // optional bool - MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel - MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption -) - -// Field numbers for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart - UninterpretedOption_IdentifierValue = 3 // optional string - UninterpretedOption_PositiveIntValue = 4 // optional uint64 - UninterpretedOption_NegativeIntValue = 5 // optional int64 - UninterpretedOption_DoubleValue = 6 // optional double - UninterpretedOption_StringValue = 7 // optional bytes - UninterpretedOption_AggregateValue = 8 // optional string -) - -// Field numbers for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_NamePart = 1 // required string - UninterpretedOption_NamePart_IsExtension = 2 // required bool -) - -// Field numbers for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location -) - -// Field numbers for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_Path = 1 // repeated int32 - SourceCodeInfo_Location_Span = 2 // repeated int32 - SourceCodeInfo_Location_LeadingComments = 3 // optional string - SourceCodeInfo_Location_TrailingComments = 4 // optional string - SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string -) - -// Field numbers for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation -) - -// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_Path = 1 // repeated int32 - GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string - GeneratedCodeInfo_Annotation_Begin = 3 // optional int32 - GeneratedCodeInfo_Annotation_End = 4 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go deleted file mode 100644 index e59788599..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldnum contains constants for field numbers of fields in messages -// declared in descriptor.proto and any of the well-known types. -package fieldnum diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go deleted file mode 100644 index 8816c7358..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Duration. -const ( - Duration_Seconds = 1 // optional int64 - Duration_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go deleted file mode 100644 index b5130a6dd..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Empty. -const () diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go deleted file mode 100644 index 7e3bfa27b..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.FieldMask. -const ( - FieldMask_Paths = 1 // repeated string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go deleted file mode 100644 index 241972b1f..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.SourceContext. -const ( - SourceContext_FileName = 1 // optional string -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go deleted file mode 100644 index c460aab44..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Struct. -const ( - Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry -) - -// Field numbers for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_Key = 1 // optional string - Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value -) - -// Field numbers for google.protobuf.Value. -const ( - Value_NullValue = 1 // optional google.protobuf.NullValue - Value_NumberValue = 2 // optional double - Value_StringValue = 3 // optional string - Value_BoolValue = 4 // optional bool - Value_StructValue = 5 // optional google.protobuf.Struct - Value_ListValue = 6 // optional google.protobuf.ListValue -) - -// Field numbers for google.protobuf.ListValue. -const ( - ListValue_Values = 1 // repeated google.protobuf.Value -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go deleted file mode 100644 index b4346fba5..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Timestamp. -const ( - Timestamp_Seconds = 1 // optional int64 - Timestamp_Nanos = 2 // optional int32 -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go deleted file mode 100644 index b392e9598..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.Type. -const ( - Type_Name = 1 // optional string - Type_Fields = 2 // repeated google.protobuf.Field - Type_Oneofs = 3 // repeated string - Type_Options = 4 // repeated google.protobuf.Option - Type_SourceContext = 5 // optional google.protobuf.SourceContext - Type_Syntax = 6 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.Field. -const ( - Field_Kind = 1 // optional google.protobuf.Field.Kind - Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality - Field_Number = 3 // optional int32 - Field_Name = 4 // optional string - Field_TypeUrl = 6 // optional string - Field_OneofIndex = 7 // optional int32 - Field_Packed = 8 // optional bool - Field_Options = 9 // repeated google.protobuf.Option - Field_JsonName = 10 // optional string - Field_DefaultValue = 11 // optional string -) - -// Field numbers for google.protobuf.Enum. -const ( - Enum_Name = 1 // optional string - Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue - Enum_Options = 3 // repeated google.protobuf.Option - Enum_SourceContext = 4 // optional google.protobuf.SourceContext - Enum_Syntax = 5 // optional google.protobuf.Syntax -) - -// Field numbers for google.protobuf.EnumValue. -const ( - EnumValue_Name = 1 // optional string - EnumValue_Number = 2 // optional int32 - EnumValue_Options = 3 // repeated google.protobuf.Option -) - -// Field numbers for google.protobuf.Option. -const ( - Option_Name = 1 // optional string - Option_Value = 2 // optional google.protobuf.Any -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go deleted file mode 100644 index 42f846a9f..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package fieldnum - -// Field numbers for google.protobuf.DoubleValue. -const ( - DoubleValue_Value = 1 // optional double -) - -// Field numbers for google.protobuf.FloatValue. -const ( - FloatValue_Value = 1 // optional float -) - -// Field numbers for google.protobuf.Int64Value. -const ( - Int64Value_Value = 1 // optional int64 -) - -// Field numbers for google.protobuf.UInt64Value. -const ( - UInt64Value_Value = 1 // optional uint64 -) - -// Field numbers for google.protobuf.Int32Value. -const ( - Int32Value_Value = 1 // optional int32 -) - -// Field numbers for google.protobuf.UInt32Value. -const ( - UInt32Value_Value = 1 // optional uint32 -) - -// Field numbers for google.protobuf.BoolValue. -const ( - BoolValue_Value = 1 // optional bool -) - -// Field numbers for google.protobuf.StringValue. -const ( - StringValue_Value = 1 // optional string -) - -// Field numbers for google.protobuf.BytesValue. -const ( - BytesValue_Value = 1 // optional bytes -) diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go deleted file mode 100644 index 517c4e2a0..000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldsort defines an ordering of fields. -// -// The ordering defined by this package matches the historic behavior of the proto -// package, placing extensions first and oneofs last. -// -// There is no guarantee about stability of the wire encoding, and users should not -// depend on the order defined in this package as it is subject to change without -// notice. -package fieldsort - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Less returns true if field a comes before field j in ordered wire marshal output. -func Less(a, b protoreflect.FieldDescriptor) bool { - ea := a.IsExtension() - eb := b.IsExtension() - oa := a.ContainingOneof() - ob := b.ContainingOneof() - switch { - case ea != eb: - return ea - case oa != nil && ob != nil: - if oa == ob { - return a.Number() < b.Number() - } - return oa.Index() < ob.Index() - case oa != nil && !oa.IsSynthetic(): - return false - case ob != nil && !ob.IsSynthetic(): - return true - default: - return a.Number() < b.Number() - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index 462d384e9..b293b6947 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -3,11 +3,14 @@ // license that can be found in the LICENSE file. // Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. package filedesc import ( "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -126,24 +129,24 @@ func (db *Builder) unmarshalCounts(b []byte, isFile bool) { b = b[m:] if isFile { switch num { - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: db.NumExtensions++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: db.NumServices++ } } else { switch num { - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: db.NumEnums++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: db.unmarshalCounts(v, false) db.NumMessages++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: db.NumExtensions++ } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 2540befd6..98ab142ae 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,8 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -98,15 +100,6 @@ func (fd *File) lazyInitOnce() { fd.mu.Unlock() } -// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code -// to be able to retrieve the raw descriptor. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *File) ProtoLegacyRawDesc() []byte { - return fd.builder.RawDescriptor -} - // GoPackagePath is a pseudo-internal API for determining the Go package path // that this file descriptor is declared in. // @@ -206,7 +199,7 @@ type ( Number pref.FieldNumber Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers Kind pref.Kind - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions HasPacked bool // promoted from google.protobuf.FieldOptions @@ -276,8 +269,9 @@ func (fd *Field) Options() pref.ProtoMessage { func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } -func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } @@ -302,13 +296,13 @@ func (fd *Field) MapKey() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(1) + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } func (fd *Field) MapValue() pref.FieldDescriptor { if !fd.IsMap() { return nil } - return fd.Message().Fields().ByNumber(2) + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } func (fd *Field) HasDefault() bool { return fd.L1.Default.has } func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } @@ -372,7 +366,7 @@ type ( } ExtensionL2 struct { Options func() pref.ProtoMessage - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue @@ -390,8 +384,9 @@ func (xd *Extension) Options() pref.ProtoMessage { func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } -func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional @@ -505,27 +500,50 @@ func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syn func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} -type jsonName struct { - has bool - once sync.Once - name string +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string } -// Init initializes the name. It is exported for use by other internal packages. -func (js *jsonName) Init(s string) { - js.has = true - js.name = s +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name } -func (js *jsonName) get(fd pref.FieldDescriptor) string { - if !js.has { - js.once.Do(func() { - js.name = strs.JSONCamelCase(string(fd.Name())) - }) - } - return js.name +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s } +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index c0cddf86a..66e1fee52 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -8,7 +8,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -107,7 +107,7 @@ func (fd *File) unmarshalSeed(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Syntax: + case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": fd.L1.Syntax = pref.Proto2 @@ -116,36 +116,36 @@ func (fd *File) unmarshalSeed(b []byte) { default: panic("invalid syntax") } - case fieldnum.FileDescriptorProto_Name: + case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) - case fieldnum.FileDescriptorProto_Package: + case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = pref.FullName(sb.MakeString(v)) - case fieldnum.FileDescriptorProto_EnumType: - if prevField != fieldnum.FileDescriptorProto_EnumType { + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.FileDescriptorProto_MessageType: - if prevField != fieldnum.FileDescriptorProto_MessageType { + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.FileDescriptorProto_Extension: - if prevField != fieldnum.FileDescriptorProto_Extension { + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.FileDescriptorProto_Service: - if prevField != fieldnum.FileDescriptorProto_Service { + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { if numServices > 0 { panic("non-contiguous repeated field") } @@ -233,9 +233,9 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Name: + case genid.EnumDescriptorProto_Name_field_number: ed.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: numValues++ } default: @@ -260,7 +260,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) i++ } @@ -288,33 +288,33 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Name: + case genid.DescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.DescriptorProto_EnumType: - if prevField != fieldnum.DescriptorProto_EnumType { + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { if numEnums > 0 { panic("non-contiguous repeated field") } posEnums = len(b0) - len(b) - n - m } numEnums++ - case fieldnum.DescriptorProto_NestedType: - if prevField != fieldnum.DescriptorProto_NestedType { + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { if numMessages > 0 { panic("non-contiguous repeated field") } posMessages = len(b0) - len(b) - n - m } numMessages++ - case fieldnum.DescriptorProto_Extension: - if prevField != fieldnum.DescriptorProto_Extension { + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { if numExtensions > 0 { panic("non-contiguous repeated field") } posExtensions = len(b0) - len(b) - n - m } numExtensions++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalSeedOptions(v) } prevField = num @@ -375,9 +375,9 @@ func (md *Message) unmarshalSeedOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -400,20 +400,20 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: xd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: xd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: xd.L1.Kind = pref.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: xd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_Extendee: + case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) } default: @@ -436,7 +436,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.D v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Name: + case genid.ServiceDescriptorProto_Name_field_number: sd.L0.FullName = appendFullName(sb, pd.FullName(), v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index bc215944a..198451e3e 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" @@ -143,35 +143,35 @@ func (fd *File) unmarshalFull(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_PublicDependency: + case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case fieldnum.FileDescriptorProto_WeakDependency: + case genid.FileDescriptorProto_WeakDependency_field_number: fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FileDescriptorProto_Dependency: + case genid.FileDescriptorProto_Dependency_field_number: path := sb.MakeString(v) imp, _ := fd.builder.FileRegistry.FindFileByPath(path) if imp == nil { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) - case fieldnum.FileDescriptorProto_EnumType: + case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.FileDescriptorProto_MessageType: + case genid.FileDescriptorProto_MessageType_field_number: fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.FileDescriptorProto_Extension: + case genid.FileDescriptorProto_Extension_field_number: fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.FileDescriptorProto_Service: + case genid.FileDescriptorProto_Service_field_number: fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) serviceIdx++ - case fieldnum.FileDescriptorProto_Options: + case genid.FileDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -196,13 +196,13 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_Value: + case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) - case fieldnum.EnumDescriptorProto_ReservedName: + case genid.EnumDescriptorProto_ReservedName_field_number: ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.EnumDescriptorProto_ReservedRange: + case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) - case fieldnum.EnumDescriptorProto_Options: + case genid.EnumDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -228,9 +228,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumDescriptorProto_EnumReservedRange_Start: + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: r[0] = pref.EnumNumber(v) - case fieldnum.EnumDescriptorProto_EnumReservedRange_End: + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: r[1] = pref.EnumNumber(v) } default: @@ -255,17 +255,17 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Number: + case genid.EnumValueDescriptorProto_Number_field_number: vd.L1.Number = pref.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.EnumValueDescriptorProto_Name: + case genid.EnumValueDescriptorProto_Name_field_number: // NOTE: Enum values are in the same scope as the enum parent. vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) - case fieldnum.EnumValueDescriptorProto_Options: + case genid.EnumValueDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -289,29 +289,29 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_Field: + case genid.DescriptorProto_Field_field_number: rawFields = append(rawFields, v) - case fieldnum.DescriptorProto_OneofDecl: + case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) - case fieldnum.DescriptorProto_ReservedName: + case genid.DescriptorProto_ReservedName_field_number: md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case fieldnum.DescriptorProto_ReservedRange: + case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) - case fieldnum.DescriptorProto_ExtensionRange: + case genid.DescriptorProto_ExtensionRange_field_number: r, rawOptions := unmarshalMessageExtensionRange(v) opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) - case fieldnum.DescriptorProto_EnumType: + case genid.DescriptorProto_EnumType_field_number: md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ - case fieldnum.DescriptorProto_NestedType: + case genid.DescriptorProto_NestedType_field_number: md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) messageIdx++ - case fieldnum.DescriptorProto_Extension: + case genid.DescriptorProto_Extension_field_number: md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ - case fieldnum.DescriptorProto_Options: + case genid.DescriptorProto_Options_field_number: md.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -347,9 +347,9 @@ func (md *Message) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MessageOptions_MapEntry: + case genid.MessageOptions_MapEntry_field_number: md.L1.IsMapEntry = protowire.DecodeBool(v) - case fieldnum.MessageOptions_MessageSetWireFormat: + case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } default: @@ -368,9 +368,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ReservedRange_Start: + case genid.DescriptorProto_ReservedRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ReservedRange_End: + case genid.DescriptorProto_ReservedRange_End_field_number: r[1] = pref.FieldNumber(v) } default: @@ -390,16 +390,16 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Start: + case genid.DescriptorProto_ExtensionRange_Start_field_number: r[0] = pref.FieldNumber(v) - case fieldnum.DescriptorProto_ExtensionRange_End: + case genid.DescriptorProto_ExtensionRange_End_field_number: r[1] = pref.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.DescriptorProto_ExtensionRange_Options: + case genid.DescriptorProto_ExtensionRange_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -425,13 +425,13 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Number: + case genid.FieldDescriptorProto_Number_field_number: fd.L1.Number = pref.FieldNumber(v) - case fieldnum.FieldDescriptorProto_Label: + case genid.FieldDescriptorProto_Label_field_number: fd.L1.Cardinality = pref.Cardinality(v) - case fieldnum.FieldDescriptorProto_Type: + case genid.FieldDescriptorProto_Type_field_number: fd.L1.Kind = pref.Kind(v) - case fieldnum.FieldDescriptorProto_OneofIndex: + case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either // of them. This ensures pointers to slice elements are stable. @@ -441,22 +441,22 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des panic("oneof type already set") } fd.L1.ContainingOneof = od - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: fd.L1.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Name: + case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.FieldDescriptorProto_JsonName: - fd.L1.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: fd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -488,10 +488,10 @@ func (fd *Field) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: fd.L1.HasPacked = true fd.L1.IsPacked = protowire.DecodeBool(v) - case fieldnum.FieldOptions_Weak: + case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.HasEnforceUTF8 = true @@ -518,9 +518,9 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.OneofDescriptorProto_Name: + case genid.OneofDescriptorProto_Name_field_number: od.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.OneofDescriptorProto_Options: + case genid.OneofDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -543,20 +543,20 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_Proto3Optional: + case genid.FieldDescriptorProto_Proto3Optional_field_number: xd.L2.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.FieldDescriptorProto_JsonName: - xd.L2.JSONName.Init(sb.MakeString(v)) - case fieldnum.FieldDescriptorProto_DefaultValue: + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions - case fieldnum.FieldDescriptorProto_TypeName: + case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v - case fieldnum.FieldDescriptorProto_Options: + case genid.FieldDescriptorProto_Options_field_number: xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } @@ -586,7 +586,7 @@ func (xd *Extension) unmarshalOptions(b []byte) { v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.FieldOptions_Packed: + case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } default: @@ -608,9 +608,9 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.ServiceDescriptorProto_Method: + case genid.ServiceDescriptorProto_Method_field_number: rawMethods = append(rawMethods, v) - case fieldnum.ServiceDescriptorProto_Options: + case genid.ServiceDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: @@ -641,22 +641,22 @@ func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.De v, m := protowire.ConsumeVarint(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_ClientStreaming: + case genid.MethodDescriptorProto_ClientStreaming_field_number: md.L1.IsStreamingClient = protowire.DecodeBool(v) - case fieldnum.MethodDescriptorProto_ServerStreaming: + case genid.MethodDescriptorProto_ServerStreaming_field_number: md.L1.IsStreamingServer = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case fieldnum.MethodDescriptorProto_Name: + case genid.MethodDescriptorProto_Name_field_number: md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case fieldnum.MethodDescriptorProto_InputType: + case genid.MethodDescriptorProto_InputType_field_number: md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_OutputType: + case genid.MethodDescriptorProto_OutputType_field_number: md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) - case fieldnum.MethodDescriptorProto_Options: + case genid.MethodDescriptorProto_Options_field_number: rawOptions = appendOptions(rawOptions, v) } default: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index 1b7089b64..aa294fff9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -10,6 +10,8 @@ import ( "sort" "sync" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/errors" @@ -185,10 +187,7 @@ func (p *FieldRanges) CheckValid(isMessageSet bool) error { // Unlike the FieldNumber.IsValid method, it allows ranges that cover the // reserved number range. func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { - if isMessageSet { - return protowire.MinValidNumber <= n && n <= math.MaxInt32 - } - return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) } // CheckOverlap reports an error if p and q overlap. @@ -249,6 +248,7 @@ type OneofFields struct { once sync.Once byName map[pref.Name]pref.FieldDescriptor // protected by once byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once } @@ -256,6 +256,7 @@ func (p *OneofFields) Len() int { return func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} @@ -265,11 +266,13 @@ func (p *OneofFields) lazyInit() *OneofFields { if len(p.List) > 0 { p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f p.byNum[f.Number()] = f } } @@ -278,9 +281,170 @@ func (p *OneofFields) lazyInit() *OneofFields { } type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } +} +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 6a8825e80..30db19fdc 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -142,6 +142,7 @@ type Fields struct { once sync.Once byName map[protoreflect.Name]*Field // protected by once byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once byNum map[protoreflect.FieldNumber]*Field // protected by once } @@ -163,6 +164,12 @@ func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { } return nil } +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { if d := p.lazyInit().byNum[n]; d != nil { return d @@ -178,6 +185,7 @@ func (p *Fields) lazyInit() *Fields { if len(p.List) > 0 { p.byName = make(map[protoreflect.Name]*Field, len(p.List)) p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) for i := range p.List { d := &p.List[i] @@ -187,6 +195,9 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byJSON[d.JSONName()]; !ok { p.byJSON[d.JSONName()] = d } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 000000000..e6f7d47ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 000000000..df8f91850 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 000000000..e3cdf1c20 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 000000000..45ccd0121 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 000000000..b070ef4fd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 000000000..762abb34a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 000000000..70bed453f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 000000000..693d2e9e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 000000000..8f9ea02ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 000000000..3e99ae16c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 000000000..1a38944b2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 000000000..f5cd5634c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 000000000..3bc710138 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 000000000..429384b85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 000000000..72527d2ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go deleted file mode 100644 index f45509fbd..000000000 --- a/vendor/google.golang.org/protobuf/internal/genname/name.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package genname contains constants for generated names. -package genname - -const ( - State = "state" - - SizeCache = "sizeCache" - SizeCacheA = "XXX_sizecache" - - WeakFields = "weakFields" - WeakFieldsA = "XXX_weak" - - UnknownFields = "unknownFields" - UnknownFieldsA = "XXX_unrecognized" - - ExtensionFields = "extensionFields" - ExtensionFieldsA = "XXX_InternalExtensions" - ExtensionFieldsB = "XXX_extensions" - - WeakFieldPrefix = "XXX_weak_" -) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index 4d22c9604..abee5f30e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -10,6 +10,7 @@ import ( "strconv" "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -19,6 +20,12 @@ import ( // functions that we do not want to appear in godoc. type Export struct{} +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. type enum = interface{} @@ -160,7 +167,7 @@ func (Export) MessageTypeOf(m message) pref.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), "") + return legacyLoadMessageType(reflect.TypeOf(m), "") } // MessageStringOf returns the message value as a string, diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index c00744d38..cb4b482d1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -10,6 +10,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -20,6 +21,7 @@ type errInvalidUTF8 struct{} func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } // initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. // @@ -242,7 +244,7 @@ func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if p.Elem().IsNil() { p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) @@ -276,7 +278,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: v, @@ -420,7 +422,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: b, @@ -494,7 +496,7 @@ func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderF } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } m := reflect.New(f.mi.GoReflectType.Elem()).Interface() mp := pointerOfIface(m) @@ -550,7 +552,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -613,7 +615,7 @@ func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wt } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -681,7 +683,7 @@ func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wt } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -767,7 +769,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index ff198d0a1..1a509b63e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -15,13 +15,13 @@ import ( ) // sizeBool returns the size of wire encoding a bool pointer as a Bool. -func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBool wire encodes a bool pointer as a Bool. -func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -29,7 +29,7 @@ func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byt } // consumeBool wire decodes a bool pointer as a Bool. -func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -45,7 +45,7 @@ func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bool() = protowire.DecodeBool(v) out.n = n @@ -61,7 +61,7 @@ var coderBool = pointerCoderFuncs{ // sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. // The zero value is not encoded. -func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() if v == false { return 0 @@ -71,7 +71,7 @@ func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { // appendBoolNoZero wire encodes a bool pointer as a Bool. // The zero value is not encoded. -func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() if v == false { return b, nil @@ -90,14 +90,14 @@ var coderBoolNoZero = pointerCoderFuncs{ // sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. // It panics if the pointer is nil. -func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.BoolPtr() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBoolPtr wire encodes a *bool pointer as a Bool. // It panics if the pointer is nil. -func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.BoolPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -105,7 +105,7 @@ func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeBoolPtr wire decodes a *bool pointer as a Bool. -func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -121,7 +121,7 @@ func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.BoolPtr() if *vp == nil { @@ -140,7 +140,7 @@ var coderBoolPtr = pointerCoderFuncs{ } // sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. -func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) @@ -149,7 +149,7 @@ func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBoolSlice encodes a []bool pointer as a repeated Bool. -func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -159,13 +159,13 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. -func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -180,7 +180,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeBool(v)) b = b[n:] @@ -204,7 +204,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeBool(v)) out.n = n @@ -219,7 +219,7 @@ var coderBoolSlice = pointerCoderFuncs{ } // sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. -func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() if len(s) == 0 { return 0 @@ -232,7 +232,7 @@ func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size i } // appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. -func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() if len(s) == 0 { return b, nil @@ -257,19 +257,19 @@ var coderBoolPackedSlice = pointerCoderFuncs{ } // sizeBoolValue returns the size of wire encoding a bool value as a Bool. -func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) } // appendBoolValue encodes a bool value as a Bool. -func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) return b, nil } // consumeBoolValue decodes a bool value as a Bool. -func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -285,7 +285,7 @@ func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil @@ -299,7 +299,7 @@ var coderBoolValue = valueCoderFuncs{ } // sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. -func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -309,7 +309,7 @@ func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendBoolSliceValue encodes a []bool value as a repeated Bool. -func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -320,12 +320,12 @@ func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. -func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -340,7 +340,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) b = b[n:] @@ -363,7 +363,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) out.n = n @@ -378,7 +378,7 @@ var coderBoolSliceValue = valueCoderFuncs{ } // sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. -func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -393,7 +393,7 @@ func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. -func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -421,19 +421,19 @@ var coderBoolPackedSliceValue = valueCoderFuncs{ } // sizeEnumValue returns the size of wire encoding a value as a Enum. -func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Enum())) } // appendEnumValue encodes a value as a Enum. -func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Enum())) return b, nil } // consumeEnumValue decodes a value as a Enum. -func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -449,7 +449,7 @@ func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil @@ -463,7 +463,7 @@ var coderEnumValue = valueCoderFuncs{ } // sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. -func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -473,7 +473,7 @@ func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendEnumSliceValue encodes a [] value as a repeated Enum. -func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -484,12 +484,12 @@ func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeEnumSliceValue wire decodes a [] value as a repeated Enum. -func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -504,7 +504,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) b = b[n:] @@ -527,7 +527,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) out.n = n @@ -542,7 +542,7 @@ var coderEnumSliceValue = valueCoderFuncs{ } // sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. -func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -557,7 +557,7 @@ func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. -func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -585,13 +585,13 @@ var coderEnumPackedSliceValue = valueCoderFuncs{ } // sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. -func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32 wire encodes a int32 pointer as a Int32. -func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -599,7 +599,7 @@ func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt32 wire decodes a int32 pointer as a Int32. -func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -615,7 +615,7 @@ func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -631,7 +631,7 @@ var coderInt32 = pointerCoderFuncs{ // sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. // The zero value is not encoded. -func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -641,7 +641,7 @@ func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt32NoZero wire encodes a int32 pointer as a Int32. // The zero value is not encoded. -func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -660,14 +660,14 @@ var coderInt32NoZero = pointerCoderFuncs{ // sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. // It panics if the pointer is nil. -func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32Ptr wire encodes a *int32 pointer as a Int32. // It panics if the pointer is nil. -func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -675,7 +675,7 @@ func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt32Ptr wire decodes a *int32 pointer as a Int32. -func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -691,7 +691,7 @@ func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -710,7 +710,7 @@ var coderInt32Ptr = pointerCoderFuncs{ } // sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. -func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -719,7 +719,7 @@ func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt32Slice encodes a []int32 pointer as a repeated Int32. -func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -729,13 +729,13 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. -func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -750,7 +750,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -774,7 +774,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -789,7 +789,7 @@ var coderInt32Slice = pointerCoderFuncs{ } // sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. -func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -802,7 +802,7 @@ func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. -func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -827,19 +827,19 @@ var coderInt32PackedSlice = pointerCoderFuncs{ } // sizeInt32Value returns the size of wire encoding a int32 value as a Int32. -func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) } // appendInt32Value encodes a int32 value as a Int32. -func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(int32(v.Int()))) return b, nil } // consumeInt32Value decodes a int32 value as a Int32. -func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -855,7 +855,7 @@ func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -869,7 +869,7 @@ var coderInt32Value = valueCoderFuncs{ } // sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. -func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -879,7 +879,7 @@ func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt32SliceValue encodes a []int32 value as a repeated Int32. -func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -890,12 +890,12 @@ func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. -func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -910,7 +910,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -933,7 +933,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -948,7 +948,7 @@ var coderInt32SliceValue = valueCoderFuncs{ } // sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. -func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -963,7 +963,7 @@ func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. -func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -991,13 +991,13 @@ var coderInt32PackedSliceValue = valueCoderFuncs{ } // sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. -func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32 wire encodes a int32 pointer as a Sint32. -func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1005,7 +1005,7 @@ func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint32 wire decodes a int32 pointer as a Sint32. -func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1021,7 +1021,7 @@ func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) out.n = n @@ -1037,7 +1037,7 @@ var coderSint32 = pointerCoderFuncs{ // sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. // The zero value is not encoded. -func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -1047,7 +1047,7 @@ func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint32NoZero wire encodes a int32 pointer as a Sint32. // The zero value is not encoded. -func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -1066,14 +1066,14 @@ var coderSint32NoZero = pointerCoderFuncs{ // sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32Ptr wire encodes a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1081,7 +1081,7 @@ func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. -func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1097,7 +1097,7 @@ func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -1116,7 +1116,7 @@ var coderSint32Ptr = pointerCoderFuncs{ } // sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. -func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) @@ -1125,7 +1125,7 @@ func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint32Slice encodes a []int32 pointer as a repeated Sint32. -func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1135,13 +1135,13 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. -func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1156,7 +1156,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) b = b[n:] @@ -1180,7 +1180,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) out.n = n @@ -1195,7 +1195,7 @@ var coderSint32Slice = pointerCoderFuncs{ } // sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. -func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -1208,7 +1208,7 @@ func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. -func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -1233,19 +1233,19 @@ var coderSint32PackedSlice = pointerCoderFuncs{ } // sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. -func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) } // appendSint32Value encodes a int32 value as a Sint32. -func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) return b, nil } // consumeSint32Value decodes a int32 value as a Sint32. -func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1261,7 +1261,7 @@ func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil @@ -1275,7 +1275,7 @@ var coderSint32Value = valueCoderFuncs{ } // sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. -func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1285,7 +1285,7 @@ func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint32SliceValue encodes a []int32 value as a repeated Sint32. -func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1296,12 +1296,12 @@ func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. -func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1316,7 +1316,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) b = b[n:] @@ -1339,7 +1339,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) out.n = n @@ -1354,7 +1354,7 @@ var coderSint32SliceValue = valueCoderFuncs{ } // sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. -func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1369,7 +1369,7 @@ func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. -func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1397,13 +1397,13 @@ var coderSint32PackedSliceValue = valueCoderFuncs{ } // sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. -func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32 wire encodes a uint32 pointer as a Uint32. -func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1411,7 +1411,7 @@ func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint32 wire decodes a uint32 pointer as a Uint32. -func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1427,7 +1427,7 @@ func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = uint32(v) out.n = n @@ -1443,7 +1443,7 @@ var coderUint32 = pointerCoderFuncs{ // sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. // The zero value is not encoded. -func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -1453,7 +1453,7 @@ func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint32NoZero wire encodes a uint32 pointer as a Uint32. // The zero value is not encoded. -func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -1472,14 +1472,14 @@ var coderUint32NoZero = pointerCoderFuncs{ // sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1487,7 +1487,7 @@ func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. -func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1503,7 +1503,7 @@ func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -1522,7 +1522,7 @@ var coderUint32Ptr = pointerCoderFuncs{ } // sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. -func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1531,7 +1531,7 @@ func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. -func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1541,13 +1541,13 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. -func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1562,7 +1562,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, uint32(v)) b = b[n:] @@ -1586,7 +1586,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, uint32(v)) out.n = n @@ -1601,7 +1601,7 @@ var coderUint32Slice = pointerCoderFuncs{ } // sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. -func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -1614,7 +1614,7 @@ func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. -func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -1639,19 +1639,19 @@ var coderUint32PackedSlice = pointerCoderFuncs{ } // sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. -func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) } // appendUint32Value encodes a uint32 value as a Uint32. -func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) return b, nil } // consumeUint32Value decodes a uint32 value as a Uint32. -func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1667,7 +1667,7 @@ func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -1681,7 +1681,7 @@ var coderUint32Value = valueCoderFuncs{ } // sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. -func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1691,7 +1691,7 @@ func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. -func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1702,12 +1702,12 @@ func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. -func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1722,7 +1722,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -1745,7 +1745,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -1760,7 +1760,7 @@ var coderUint32SliceValue = valueCoderFuncs{ } // sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. -func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1775,7 +1775,7 @@ func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. -func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1803,13 +1803,13 @@ var coderUint32PackedSliceValue = valueCoderFuncs{ } // sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. -func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64 wire encodes a int64 pointer as a Int64. -func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1817,7 +1817,7 @@ func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt64 wire decodes a int64 pointer as a Int64. -func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1833,7 +1833,7 @@ func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -1849,7 +1849,7 @@ var coderInt64 = pointerCoderFuncs{ // sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. // The zero value is not encoded. -func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -1859,7 +1859,7 @@ func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt64NoZero wire encodes a int64 pointer as a Int64. // The zero value is not encoded. -func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -1878,14 +1878,14 @@ var coderInt64NoZero = pointerCoderFuncs{ // sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. // It panics if the pointer is nil. -func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64Ptr wire encodes a *int64 pointer as a Int64. // It panics if the pointer is nil. -func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1893,7 +1893,7 @@ func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt64Ptr wire decodes a *int64 pointer as a Int64. -func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1909,7 +1909,7 @@ func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -1928,7 +1928,7 @@ var coderInt64Ptr = pointerCoderFuncs{ } // sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. -func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1937,7 +1937,7 @@ func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt64Slice encodes a []int64 pointer as a repeated Int64. -func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1947,13 +1947,13 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. -func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1968,7 +1968,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -1992,7 +1992,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -2007,7 +2007,7 @@ var coderInt64Slice = pointerCoderFuncs{ } // sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. -func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2020,7 +2020,7 @@ func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. -func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2045,19 +2045,19 @@ var coderInt64PackedSlice = pointerCoderFuncs{ } // sizeInt64Value returns the size of wire encoding a int64 value as a Int64. -func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Int())) } // appendInt64Value encodes a int64 value as a Int64. -func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Int())) return b, nil } // consumeInt64Value decodes a int64 value as a Int64. -func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2073,7 +2073,7 @@ func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -2087,7 +2087,7 @@ var coderInt64Value = valueCoderFuncs{ } // sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. -func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2097,7 +2097,7 @@ func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt64SliceValue encodes a []int64 value as a repeated Int64. -func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2108,12 +2108,12 @@ func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. -func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2128,7 +2128,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -2151,7 +2151,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -2166,7 +2166,7 @@ var coderInt64SliceValue = valueCoderFuncs{ } // sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. -func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2181,7 +2181,7 @@ func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. -func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2209,13 +2209,13 @@ var coderInt64PackedSliceValue = valueCoderFuncs{ } // sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. -func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64 wire encodes a int64 pointer as a Sint64. -func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2223,7 +2223,7 @@ func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint64 wire decodes a int64 pointer as a Sint64. -func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2239,7 +2239,7 @@ func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = protowire.DecodeZigZag(v) out.n = n @@ -2255,7 +2255,7 @@ var coderSint64 = pointerCoderFuncs{ // sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. // The zero value is not encoded. -func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -2265,7 +2265,7 @@ func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint64NoZero wire encodes a int64 pointer as a Sint64. // The zero value is not encoded. -func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -2284,14 +2284,14 @@ var coderSint64NoZero = pointerCoderFuncs{ // sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64Ptr wire encodes a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2299,7 +2299,7 @@ func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. -func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2315,7 +2315,7 @@ func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -2334,7 +2334,7 @@ var coderSint64Ptr = pointerCoderFuncs{ } // sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. -func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) @@ -2343,7 +2343,7 @@ func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint64Slice encodes a []int64 pointer as a repeated Sint64. -func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2353,13 +2353,13 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. -func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2374,7 +2374,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeZigZag(v)) b = b[n:] @@ -2398,7 +2398,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeZigZag(v)) out.n = n @@ -2413,7 +2413,7 @@ var coderSint64Slice = pointerCoderFuncs{ } // sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. -func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2426,7 +2426,7 @@ func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. -func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2451,19 +2451,19 @@ var coderSint64PackedSlice = pointerCoderFuncs{ } // sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. -func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) } // appendSint64Value encodes a int64 value as a Sint64. -func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) return b, nil } // consumeSint64Value decodes a int64 value as a Sint64. -func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2479,7 +2479,7 @@ func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil @@ -2493,7 +2493,7 @@ var coderSint64Value = valueCoderFuncs{ } // sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. -func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2503,7 +2503,7 @@ func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint64SliceValue encodes a []int64 value as a repeated Sint64. -func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2514,12 +2514,12 @@ func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. -func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2534,7 +2534,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) b = b[n:] @@ -2557,7 +2557,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) out.n = n @@ -2572,7 +2572,7 @@ var coderSint64SliceValue = valueCoderFuncs{ } // sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. -func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2587,7 +2587,7 @@ func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. -func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2615,13 +2615,13 @@ var coderSint64PackedSliceValue = valueCoderFuncs{ } // sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. -func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() return f.tagsize + protowire.SizeVarint(v) } // appendUint64 wire encodes a uint64 pointer as a Uint64. -func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2629,7 +2629,7 @@ func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint64 wire decodes a uint64 pointer as a Uint64. -func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2645,7 +2645,7 @@ func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -2661,7 +2661,7 @@ var coderUint64 = pointerCoderFuncs{ // sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. // The zero value is not encoded. -func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -2671,7 +2671,7 @@ func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint64NoZero wire encodes a uint64 pointer as a Uint64. // The zero value is not encoded. -func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -2690,14 +2690,14 @@ var coderUint64NoZero = pointerCoderFuncs{ // sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint64Ptr() return f.tagsize + protowire.SizeVarint(v) } // appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2705,7 +2705,7 @@ func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. -func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2721,7 +2721,7 @@ func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -2740,7 +2740,7 @@ var coderUint64Ptr = pointerCoderFuncs{ } // sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. -func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(v) @@ -2749,7 +2749,7 @@ func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. -func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2759,13 +2759,13 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. -func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2780,7 +2780,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -2804,7 +2804,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -2819,7 +2819,7 @@ var coderUint64Slice = pointerCoderFuncs{ } // sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. -func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -2832,7 +2832,7 @@ func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. -func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -2857,19 +2857,19 @@ var coderUint64PackedSlice = pointerCoderFuncs{ } // sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. -func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(v.Uint()) } // appendUint64Value encodes a uint64 value as a Uint64. -func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, v.Uint()) return b, nil } // consumeUint64Value decodes a uint64 value as a Uint64. -func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2885,7 +2885,7 @@ func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -2899,7 +2899,7 @@ var coderUint64Value = valueCoderFuncs{ } // sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. -func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2909,7 +2909,7 @@ func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. -func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2920,12 +2920,12 @@ func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. -func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2940,7 +2940,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -2963,7 +2963,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -2978,7 +2978,7 @@ var coderUint64SliceValue = valueCoderFuncs{ } // sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. -func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2993,7 +2993,7 @@ func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. -func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3021,13 +3021,13 @@ var coderUint64PackedSliceValue = valueCoderFuncs{ } // sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. -func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32 wire encodes a int32 pointer as a Sfixed32. -func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3035,13 +3035,13 @@ func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. -func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -3057,7 +3057,7 @@ var coderSfixed32 = pointerCoderFuncs{ // sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. // The zero value is not encoded. -func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -3067,7 +3067,7 @@ func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. // The zero value is not encoded. -func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -3086,13 +3086,13 @@ var coderSfixed32NoZero = pointerCoderFuncs{ // sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3100,13 +3100,13 @@ func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. -func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -3125,14 +3125,14 @@ var coderSfixed32Ptr = pointerCoderFuncs{ } // sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. -func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. -func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3142,18 +3142,18 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. -func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -3167,7 +3167,7 @@ func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -3182,7 +3182,7 @@ var coderSfixed32Slice = pointerCoderFuncs{ } // sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. -func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -3192,7 +3192,7 @@ func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. -func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -3214,25 +3214,25 @@ var coderSfixed32PackedSlice = pointerCoderFuncs{ } // sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. -func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendSfixed32Value encodes a int32 value as a Sfixed32. -func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Int())) return b, nil } // consumeSfixed32Value decodes a int32 value as a Sfixed32. -func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -3246,14 +3246,14 @@ var coderSfixed32Value = valueCoderFuncs{ } // sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. -func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. -func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3264,17 +3264,17 @@ func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. -func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -3287,7 +3287,7 @@ func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -3302,7 +3302,7 @@ var coderSfixed32SliceValue = valueCoderFuncs{ } // sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. -func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3313,7 +3313,7 @@ func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. -func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3337,13 +3337,13 @@ var coderSfixed32PackedSliceValue = valueCoderFuncs{ } // sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. -func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32 wire encodes a uint32 pointer as a Fixed32. -func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3351,13 +3351,13 @@ func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed32 wire decodes a uint32 pointer as a Fixed32. -func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = v out.n = n @@ -3373,7 +3373,7 @@ var coderFixed32 = pointerCoderFuncs{ // sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. // The zero value is not encoded. -func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -3383,7 +3383,7 @@ func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. // The zero value is not encoded. -func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -3402,13 +3402,13 @@ var coderFixed32NoZero = pointerCoderFuncs{ // sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3416,13 +3416,13 @@ func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. -func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -3441,14 +3441,14 @@ var coderFixed32Ptr = pointerCoderFuncs{ } // sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. -func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. -func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3458,18 +3458,18 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. -func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -3483,7 +3483,7 @@ func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -3498,7 +3498,7 @@ var coderFixed32Slice = pointerCoderFuncs{ } // sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. -func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -3508,7 +3508,7 @@ func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. -func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -3530,25 +3530,25 @@ var coderFixed32PackedSlice = pointerCoderFuncs{ } // sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. -func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFixed32Value encodes a uint32 value as a Fixed32. -func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Uint())) return b, nil } // consumeFixed32Value decodes a uint32 value as a Fixed32. -func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -3562,14 +3562,14 @@ var coderFixed32Value = valueCoderFuncs{ } // sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. -func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. -func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3580,17 +3580,17 @@ func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. -func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -3603,7 +3603,7 @@ func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -3618,7 +3618,7 @@ var coderFixed32SliceValue = valueCoderFuncs{ } // sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. -func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3629,7 +3629,7 @@ func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. -func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3653,13 +3653,13 @@ var coderFixed32PackedSliceValue = valueCoderFuncs{ } // sizeFloat returns the size of wire encoding a float32 pointer as a Float. -func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloat wire encodes a float32 pointer as a Float. -func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3667,13 +3667,13 @@ func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeFloat wire decodes a float32 pointer as a Float. -func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float32() = math.Float32frombits(v) out.n = n @@ -3689,7 +3689,7 @@ var coderFloat = pointerCoderFuncs{ // sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. // The zero value is not encoded. -func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -3699,7 +3699,7 @@ func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendFloatNoZero wire encodes a float32 pointer as a Float. // The zero value is not encoded. -func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -3718,13 +3718,13 @@ var coderFloatNoZero = pointerCoderFuncs{ // sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. // It panics if the pointer is nil. -func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloatPtr wire encodes a *float32 pointer as a Float. // It panics if the pointer is nil. -func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3732,13 +3732,13 @@ func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeFloatPtr wire decodes a *float32 pointer as a Float. -func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float32Ptr() if *vp == nil { @@ -3757,14 +3757,14 @@ var coderFloatPtr = pointerCoderFuncs{ } // sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. -func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFloatSlice encodes a []float32 pointer as a repeated Float. -func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3774,18 +3774,18 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. -func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float32frombits(v)) b = b[n:] @@ -3799,7 +3799,7 @@ func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float32frombits(v)) out.n = n @@ -3814,7 +3814,7 @@ var coderFloatSlice = pointerCoderFuncs{ } // sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. -func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() if len(s) == 0 { return 0 @@ -3824,7 +3824,7 @@ func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. -func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() if len(s) == 0 { return b, nil @@ -3846,25 +3846,25 @@ var coderFloatPackedSlice = pointerCoderFuncs{ } // sizeFloatValue returns the size of wire encoding a float32 value as a Float. -func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFloatValue encodes a float32 value as a Float. -func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) return b, nil } // consumeFloatValue decodes a float32 value as a Float. -func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil @@ -3878,14 +3878,14 @@ var coderFloatValue = valueCoderFuncs{ } // sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. -func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFloatSliceValue encodes a []float32 value as a repeated Float. -func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3896,17 +3896,17 @@ func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. -func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) b = b[n:] @@ -3919,7 +3919,7 @@ func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) out.n = n @@ -3934,7 +3934,7 @@ var coderFloatSliceValue = valueCoderFuncs{ } // sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. -func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3945,7 +3945,7 @@ func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. -func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3969,13 +3969,13 @@ var coderFloatPackedSliceValue = valueCoderFuncs{ } // sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. -func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64 wire encodes a int64 pointer as a Sfixed64. -func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -3983,13 +3983,13 @@ func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. -func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -4005,7 +4005,7 @@ var coderSfixed64 = pointerCoderFuncs{ // sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. // The zero value is not encoded. -func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -4015,7 +4015,7 @@ func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. // The zero value is not encoded. -func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -4034,13 +4034,13 @@ var coderSfixed64NoZero = pointerCoderFuncs{ // sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -4048,13 +4048,13 @@ func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. -func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -4073,14 +4073,14 @@ var coderSfixed64Ptr = pointerCoderFuncs{ } // sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. -func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. -func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4090,18 +4090,18 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. -func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -4115,7 +4115,7 @@ func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -4130,7 +4130,7 @@ var coderSfixed64Slice = pointerCoderFuncs{ } // sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. -func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -4140,7 +4140,7 @@ func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. -func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -4162,25 +4162,25 @@ var coderSfixed64PackedSlice = pointerCoderFuncs{ } // sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. -func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendSfixed64Value encodes a int64 value as a Sfixed64. -func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, uint64(v.Int())) return b, nil } // consumeSfixed64Value decodes a int64 value as a Sfixed64. -func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -4194,14 +4194,14 @@ var coderSfixed64Value = valueCoderFuncs{ } // sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. -func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. -func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4212,17 +4212,17 @@ func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. -func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -4235,7 +4235,7 @@ func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -4250,7 +4250,7 @@ var coderSfixed64SliceValue = valueCoderFuncs{ } // sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. -func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4261,7 +4261,7 @@ func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. -func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4285,13 +4285,13 @@ var coderSfixed64PackedSliceValue = valueCoderFuncs{ } // sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. -func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64 wire encodes a uint64 pointer as a Fixed64. -func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4299,13 +4299,13 @@ func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed64 wire decodes a uint64 pointer as a Fixed64. -func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -4321,7 +4321,7 @@ var coderFixed64 = pointerCoderFuncs{ // sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. // The zero value is not encoded. -func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -4331,7 +4331,7 @@ func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. // The zero value is not encoded. -func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -4350,13 +4350,13 @@ var coderFixed64NoZero = pointerCoderFuncs{ // sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4364,13 +4364,13 @@ func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. -func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -4389,14 +4389,14 @@ var coderFixed64Ptr = pointerCoderFuncs{ } // sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. -func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. -func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4406,18 +4406,18 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. -func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -4431,7 +4431,7 @@ func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -4446,7 +4446,7 @@ var coderFixed64Slice = pointerCoderFuncs{ } // sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. -func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -4456,7 +4456,7 @@ func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. -func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -4478,25 +4478,25 @@ var coderFixed64PackedSlice = pointerCoderFuncs{ } // sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. -func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendFixed64Value encodes a uint64 value as a Fixed64. -func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, v.Uint()) return b, nil } // consumeFixed64Value decodes a uint64 value as a Fixed64. -func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -4510,14 +4510,14 @@ var coderFixed64Value = valueCoderFuncs{ } // sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. -func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. -func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4528,17 +4528,17 @@ func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. -func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -4551,7 +4551,7 @@ func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -4566,7 +4566,7 @@ var coderFixed64SliceValue = valueCoderFuncs{ } // sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. -func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4577,7 +4577,7 @@ func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. -func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4601,13 +4601,13 @@ var coderFixed64PackedSliceValue = valueCoderFuncs{ } // sizeDouble returns the size of wire encoding a float64 pointer as a Double. -func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDouble wire encodes a float64 pointer as a Double. -func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4615,13 +4615,13 @@ func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeDouble wire decodes a float64 pointer as a Double. -func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float64() = math.Float64frombits(v) out.n = n @@ -4637,7 +4637,7 @@ var coderDouble = pointerCoderFuncs{ // sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. // The zero value is not encoded. -func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -4647,7 +4647,7 @@ func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendDoubleNoZero wire encodes a float64 pointer as a Double. // The zero value is not encoded. -func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -4666,13 +4666,13 @@ var coderDoubleNoZero = pointerCoderFuncs{ // sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. // It panics if the pointer is nil. -func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDoublePtr wire encodes a *float64 pointer as a Double. // It panics if the pointer is nil. -func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4680,13 +4680,13 @@ func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeDoublePtr wire decodes a *float64 pointer as a Double. -func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float64Ptr() if *vp == nil { @@ -4705,14 +4705,14 @@ var coderDoublePtr = pointerCoderFuncs{ } // sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. -func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendDoubleSlice encodes a []float64 pointer as a repeated Double. -func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4722,18 +4722,18 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. -func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float64frombits(v)) b = b[n:] @@ -4747,7 +4747,7 @@ func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float64frombits(v)) out.n = n @@ -4762,7 +4762,7 @@ var coderDoubleSlice = pointerCoderFuncs{ } // sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. -func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() if len(s) == 0 { return 0 @@ -4772,7 +4772,7 @@ func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. -func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() if len(s) == 0 { return b, nil @@ -4794,25 +4794,25 @@ var coderDoublePackedSlice = pointerCoderFuncs{ } // sizeDoubleValue returns the size of wire encoding a float64 value as a Double. -func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendDoubleValue encodes a float64 value as a Double. -func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) return b, nil } // consumeDoubleValue decodes a float64 value as a Double. -func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil @@ -4826,14 +4826,14 @@ var coderDoubleValue = valueCoderFuncs{ } // sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. -func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendDoubleSliceValue encodes a []float64 value as a repeated Double. -func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4844,17 +4844,17 @@ func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. -func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) b = b[n:] @@ -4867,7 +4867,7 @@ func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Num } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) out.n = n @@ -4882,7 +4882,7 @@ var coderDoubleSliceValue = valueCoderFuncs{ } // sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. -func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4893,7 +4893,7 @@ func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. -func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4917,13 +4917,13 @@ var coderDoublePackedSliceValue = valueCoderFuncs{ } // sizeString returns the size of wire encoding a string pointer as a String. -func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() return f.tagsize + protowire.SizeBytes(len(v)) } // appendString wire encodes a string pointer as a String. -func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4931,15 +4931,15 @@ func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeString wire decodes a string pointer as a String. -func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4952,7 +4952,7 @@ var coderString = pointerCoderFuncs{ } // appendStringValidateUTF8 wire encodes a string pointer as a String. -func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4963,18 +4963,18 @@ func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalO } // consumeStringValidateUTF8 wire decodes a string pointer as a String. -func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4988,7 +4988,7 @@ var coderStringValidateUTF8 = pointerCoderFuncs{ // sizeStringNoZero returns the size of wire encoding a string pointer as a String. // The zero value is not encoded. -func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() if len(v) == 0 { return 0 @@ -4998,7 +4998,7 @@ func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendStringNoZero wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5017,7 +5017,7 @@ var coderStringNoZero = pointerCoderFuncs{ // appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5039,14 +5039,14 @@ var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ // sizeStringPtr returns the size of wire encoding a *string pointer as a String. // It panics if the pointer is nil. -func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.StringPtr() return f.tagsize + protowire.SizeBytes(len(v)) } // appendStringPtr wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5054,19 +5054,19 @@ func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeStringPtr wire decodes a *string pointer as a String. -func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5080,7 +5080,7 @@ var coderStringPtr = pointerCoderFuncs{ // appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5091,22 +5091,22 @@ func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marsh } // consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. -func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5119,7 +5119,7 @@ var coderStringPtrValidateUTF8 = pointerCoderFuncs{ } // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. -func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.StringSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5128,7 +5128,7 @@ func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendStringSlice encodes a []string pointer as a repeated String. -func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5138,16 +5138,16 @@ func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeStringSlice wire decodes a []string pointer as a repeated String. -func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.StringSlice() if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *sp = append(*sp, v) + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5160,7 +5160,7 @@ var coderStringSlice = pointerCoderFuncs{ } // appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. -func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5173,19 +5173,19 @@ func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. -func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.StringSlice() +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *sp = append(*sp, v) + sp := p.StringSlice() + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5198,25 +5198,25 @@ var coderStringSliceValidateUTF8 = pointerCoderFuncs{ } // sizeStringValue returns the size of wire encoding a string value as a String. -func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.String())) } // appendStringValue encodes a string value as a String. -func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) return b, nil } // consumeStringValue decodes a string value as a String. -func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfString(string(v)), out, nil @@ -5230,7 +5230,7 @@ var coderStringValue = valueCoderFuncs{ } // appendStringValueValidateUTF8 encodes a string value as a String. -func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) if !utf8.ValidString(v.String()) { @@ -5240,15 +5240,15 @@ func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint6 } // consumeStringValueValidateUTF8 decodes a string value as a String. -func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return protoreflect.Value{}, out, errInvalidUTF8{} } out.n = n @@ -5263,7 +5263,7 @@ var coderStringValueValidateUTF8 = valueCoderFuncs{ } // sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. -func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5273,7 +5273,7 @@ func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendStringSliceValue encodes a []string value as a repeated String. -func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5284,14 +5284,14 @@ func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeStringSliceValue wire decodes a []string value as a repeated String. -func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfString(string(v))) out.n = n @@ -5306,13 +5306,13 @@ var coderStringSliceValue = valueCoderFuncs{ } // sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. -func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() return f.tagsize + protowire.SizeBytes(len(v)) } // appendBytes wire encodes a []byte pointer as a Bytes. -func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5320,13 +5320,13 @@ func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeBytes wire decodes a []byte pointer as a Bytes. -func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(emptyBuf[:], v...) out.n = n @@ -5341,7 +5341,7 @@ var coderBytes = pointerCoderFuncs{ } // appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. -func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5352,13 +5352,13 @@ func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOp } // consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5377,7 +5377,7 @@ var coderBytesValidateUTF8 = pointerCoderFuncs{ // sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. // The zero value is not encoded. -func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() if len(v) == 0 { return 0 @@ -5387,7 +5387,7 @@ func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendBytesNoZero wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5399,13 +5399,13 @@ func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) // consumeBytesNoZero wire decodes a []byte pointer as a Bytes. // The zero value is not decoded. -func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(([]byte)(nil), v...) out.n = n @@ -5421,7 +5421,7 @@ var coderBytesNoZero = pointerCoderFuncs{ // appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5435,13 +5435,13 @@ func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5459,7 +5459,7 @@ var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ } // sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. -func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BytesSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5468,7 +5468,7 @@ func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5478,14 +5478,14 @@ func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BytesSlice() if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n @@ -5500,7 +5500,7 @@ var coderBytesSlice = pointerCoderFuncs{ } // appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5513,18 +5513,18 @@ func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mars } // consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BytesSlice() +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} } + sp := p.BytesSlice() *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n return out, nil @@ -5538,25 +5538,25 @@ var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ } // sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. -func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.Bytes())) } // appendBytesValue encodes a []byte value as a Bytes. -func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendBytes(b, v.Bytes()) return b, nil } // consumeBytesValue decodes a []byte value as a Bytes. -func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil @@ -5570,7 +5570,7 @@ var coderBytesValue = valueCoderFuncs{ } // sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. -func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5580,7 +5580,7 @@ func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. -func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5591,14 +5591,14 @@ func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. -func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) out.n = n diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 35a67c25b..c1245fef4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -5,11 +5,11 @@ package impl import ( - "errors" "reflect" "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -117,7 +117,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -126,15 +126,15 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) @@ -143,7 +143,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } key = v n = o.n - case 2: + case genid.MapEntry_Value_field_number: var v pref.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) @@ -156,7 +156,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err @@ -174,7 +174,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -183,10 +183,10 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -207,7 +207,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi var v []byte v, n = protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var o unmarshalOutput o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) @@ -220,7 +220,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 0e176d565..cd40527ff 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/order" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -27,6 +27,7 @@ type coderMessageInfo struct { coderFields map[protowire.Number]*coderFieldInfo sizecacheOffset offset unknownOffset offset + unknownPtrKind bool extensionOffset offset needsInitCheck bool isMessageSet bool @@ -47,9 +48,20 @@ type coderFieldInfo struct { } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { - mi.sizecacheOffset = si.sizecacheOffset - mi.unknownOffset = si.unknownOffset - mi.extensionOffset = si.extensionOffset + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } mi.coderFields = make(map[protowire.Number]*coderFieldInfo) fields := mi.Desc.Fields() @@ -73,6 +85,27 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { var funcs pointerCoderFuncs var childMessage *MessageInfo switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } case isOneof: fieldOffset = offsetOf(fs, mi.Exporter) case fd.IsWeak(): @@ -136,7 +169,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { sort.Slice(mi.orderedCoderFields, func(i, j int) bool { fi := fields.ByNumber(mi.orderedCoderFields[i].num) fj := fields.ByNumber(mi.orderedCoderFields[j].num) - return fieldsort.Less(fi, fj) + return order.LegacyFieldOrder(fi, fj) }) } @@ -157,3 +190,28 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.methods.Merge = mi.merge } } + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index cfb68e12f..b7a23faf1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -29,8 +29,9 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } - unknown := *p.Apply(mi.unknownOffset).Bytes() - size += messageset.SizeUnknown(unknown) + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } return size } @@ -69,10 +70,12 @@ func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions } } - unknown := *p.Apply(mi.unknownOffset).Bytes() - b, err := messageset.AppendUnknown(b, unknown) - if err != nil { - return b, err + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } } return b, nil @@ -100,13 +103,13 @@ func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOpt *ep = make(map[int32]ExtensionField) } ext := *ep - unknown := p.Apply(mi.unknownOffset).Bytes() initialized := true err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) if err == errUnknown { - *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) - *unknown = append(*unknown, v...) + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) return nil } if !o.initialized { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 86f7dc3c9..90705e3ae 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -30,7 +30,7 @@ func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } p.v.Elem().SetInt(int64(v)) out.n = n @@ -130,12 +130,12 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) @@ -150,7 +150,7 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 36a90dff3..acd61bb50 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -423,6 +423,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } if m, ok := v.Interface().(pref.ProtoMessage); ok { return pref.ValueOfMessage(m.ProtoReflect()) } @@ -437,6 +444,16 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } if rv.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) } @@ -451,6 +468,9 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } return rv.Type() == c.goType } @@ -459,9 +479,18 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { } func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } return c.PBValueOf(reflect.New(c.goType.Elem())) } func (c *messageConverter) Zero() pref.Value { return c.PBValueOf(reflect.Zero(c.goType)) } + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 85ba1d3b3..949dc49a6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -17,6 +17,8 @@ import ( piface "google.golang.org/protobuf/runtime/protoiface" ) +var errDecode = errors.New("cannot parse invalid wire-format data") + type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags resolver interface { @@ -100,13 +102,13 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. var n int tag, n = protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } b = b[n:] } var num protowire.Number if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, errors.New("invalid field number") + return out, errDecode } else { num = protowire.Number(n) } @@ -114,7 +116,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if wtyp == protowire.EndGroupType { if num != groupTag { - return out, errors.New("mismatching end group marker") + return out, errDecode } groupTag = 0 break @@ -170,10 +172,10 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. } n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { - u := p.Apply(mi.unknownOffset).Bytes() + u := mi.mutableUnknownBytes(p) *u = protowire.AppendTag(*u, num, wtyp) *u = append(*u, b[:n]...) } @@ -181,7 +183,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. b = b[n:] } if groupTag != 0 { - return out, errors.New("missing end group marker") + return out, errDecode } if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { initialized = false @@ -221,7 +223,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, nil } case ValidationInvalid: - return out, errors.New("invalid wire format") + return out, errDecode case ValidationUnknown: } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 8c8a794c6..845c67d6e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -79,8 +79,9 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int size += f.funcs.size(fptr, f, opts) } if mi.unknownOffset.IsValid() { - u := *p.Apply(mi.unknownOffset).Bytes() - size += len(u) + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } } if mi.sizecacheOffset.IsValid() { if size > math.MaxInt32 { @@ -141,8 +142,9 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt } } if mi.unknownOffset.IsValid() && !mi.isMessageSet { - u := *p.Apply(mi.unknownOffset).Bytes() - b = append(b, u...) + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } } return b, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index c3d741c2f..e3fb0b578 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -30,7 +30,7 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), name) + return legacyLoadMessageType(reflect.TypeOf(m), name) } // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 61757ce50..49e723161 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -154,7 +154,8 @@ func (x placeholderExtension) Number() pref.FieldNumber { retu func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } func (x placeholderExtension) Kind() pref.Kind { return 0 } func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } func (x placeholderExtension) HasPresence() bool { return false } func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 06c68e117..029feeefd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -24,14 +24,24 @@ import ( // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. func legacyWrapMessage(v reflect.Value) pref.Message { - typ := v.Type() - if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} } - mt := legacyLoadMessageInfo(typ, "") + mt := legacyLoadMessageInfo(t, "") return mt.MessageOf(v.Interface()) } +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, @@ -49,8 +59,9 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { GoReflectType: t, } + var hasMarshal, hasUnmarshal bool v := reflect.Zero(t).Interface() - if _, ok := v.(legacyMarshaler); ok { + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { mi.methods.Marshal = legacyMarshal // We have no way to tell whether the type's Marshal method @@ -59,10 +70,10 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // calling Marshal methods when present. mi.methods.Flags |= piface.SupportMarshalDeterministic } - if _, ok := v.(legacyUnmarshaler); ok { + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal } - if _, ok := v.(legacyMerger); ok { + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { mi.methods.Merge = legacyMerge } @@ -75,7 +86,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor // LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which must be a *struct kind and not implement the v2 API already. +// which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { @@ -114,17 +125,19 @@ func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescr // If the Go type has no fields, then this might be a proto3 empty message // from before the size cache was added. If there are any fields, check to // see that at least one of them looks like something we generated. - if nfield := t.Elem().NumField(); nfield > 0 { - hasProtoField := false - for i := 0; i < nfield; i++ { - f := t.Elem().Field(i) - if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { - hasProtoField = true - break + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) } - } - if !hasProtoField { - return aberrantLoadMessageDesc(t, name) } } @@ -370,7 +383,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var legacyProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &piface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -401,18 +414,47 @@ func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) + if !ok { + return piface.MergeOutput{} + } + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { return piface.MergeOutput{} } - merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } return piface.MergeOutput{Flags: piface.MergeComplete} } @@ -422,6 +464,9 @@ type aberrantMessageType struct { } func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) Zero() pref.Message { @@ -443,6 +488,17 @@ type aberrantMessage struct { v reflect.Value } +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + func (m aberrantMessage) ProtoReflect() pref.Message { return m } @@ -454,33 +510,40 @@ func (m aberrantMessage) Type() pref.MessageType { return aberrantMessageType{m.v.Type()} } func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } return aberrantMessage{reflect.Zero(m.v.Type())} } func (m aberrantMessage) Interface() pref.ProtoMessage { return m } func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return } func (m aberrantMessage) Has(pref.FieldDescriptor) bool { - panic("invalid field descriptor") + return false } func (m aberrantMessage) Clear(pref.FieldDescriptor) { - panic("invalid field descriptor") + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { - panic("invalid field descriptor") + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { - panic("invalid oneof descriptor") + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) GetUnknown() pref.RawFields { return nil @@ -489,13 +552,13 @@ func (m aberrantMessage) SetUnknown(pref.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { - // An invalid message is a read-only, empty message. Since we don't know anything - // about the alleged contents of this message, we can't say with confidence that - // it is invalid in this sense. Therefore, report it as valid. - return true + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false } func (m aberrantMessage) ProtoMethods() *piface.Methods { - return legacyProtoMethods + return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { return m.v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index cdc4267df..c65bbc044 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -77,9 +77,9 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } if mi.unknownOffset.IsValid() { - du := dst.Apply(mi.unknownOffset).Bytes() - su := src.Apply(mi.unknownOffset).Bytes() - if len(*su) > 0 { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) *du = append(*du, *su...) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 7dd994bd9..a104e28e8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -12,9 +12,10 @@ import ( "sync" "sync/atomic" - "google.golang.org/protobuf/internal/genname" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -109,22 +110,29 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { type ( SizeCache = int32 WeakFields = map[int32]protoreflect.ProtoMessage - UnknownFields = []byte + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte ExtensionFields = map[int32]ExtensionField ) var ( sizecacheType = reflect.TypeOf(SizeCache(0)) weakFieldsType = reflect.TypeOf(WeakFields(nil)) - unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) ) type structInfo struct { sizecacheOffset offset + sizecacheType reflect.Type weakOffset offset + weakType reflect.Type unknownOffset offset + unknownType reflect.Type extensionOffset offset + extensionType reflect.Type fieldsByNumber map[pref.FieldNumber]reflect.StructField oneofsByName map[pref.Name]reflect.StructField @@ -148,21 +156,25 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { fieldLoop: for i := 0; i < t.NumField(); i++ { switch f := t.Field(i); f.Name { - case genname.SizeCache, genname.SizeCacheA: + case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type } - case genname.WeakFields, genname.WeakFieldsA: + case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type } - case genname.UnknownFields, genname.UnknownFieldsA: - if f.Type == unknownFieldsType { + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type } - case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB: + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type } default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { @@ -212,4 +224,53 @@ func (mi *MessageInfo) New() protoreflect.Message { func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) } -func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 0f4b8db76..9488b7261 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" + "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -16,6 +17,11 @@ type reflectMessageInfo struct { fields map[pref.FieldNumber]*fieldInfo oneofs map[pref.Name]*oneofInfo + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) // It provides faster access to the fieldInfo, but may be incomplete. @@ -36,6 +42,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { mi.makeKnownFieldsFunc(si) mi.makeUnknownFieldsFunc(t, si) mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) } // makeKnownFieldsFunc generates functions for operations that can be performed @@ -51,17 +58,23 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } var fi fieldInfo switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) case fd.IsMap(): fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) case fd.IsWeak(): fi = fieldInfoForWeakMessage(fd, si.weakOffset) - case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: fi = fieldInfoForScalar(fd, fs, mi.Exporter) @@ -92,27 +105,53 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { i++ } } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } } func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { - mi.getUnknown = func(pointer) pref.RawFields { return nil } - mi.setUnknown = func(pointer, pref.RawFields) { return } - if si.unknownOffset.IsValid() { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. mi.getUnknown = func(p pointer) pref.RawFields { if p.IsNil() { return nil } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - return pref.RawFields(*rv.Interface().(*[]byte)) + return *p.Apply(mi.unknownOffset).Bytes() } mi.setUnknown = func(p pointer, b pref.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - *rv.Interface().(*[]byte) = []byte(b) + *p.Apply(mi.unknownOffset).Bytes() = b } - } else { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: mi.getUnknown = func(pointer) pref.RawFields { return nil } @@ -139,6 +178,58 @@ func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { } } } +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} type extensionMap map[int32]ExtensionField @@ -306,7 +397,6 @@ var ( // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { - // TODO: Switch the input to be an opaque Pointer. if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -320,6 +410,17 @@ func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { func (m *messageReflectWrapper) pointer() pointer { return m.p } func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} func (m *messageIfaceWrapper) ProtoReflect() pref.Message { return (*messageReflectWrapper)(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 23124a86e..343cf8721 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -28,6 +28,39 @@ type fieldInfo struct { newField func() pref.Value } +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { @@ -97,7 +130,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv.Set(reflect.New(ot)) } rv = rv.Elem().Elem().Field(0) - if rv.IsNil() { + if rv.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) @@ -225,7 +258,10 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { - panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false } if ft.Kind() == reflect.Ptr { ft = ft.Elem() @@ -388,6 +424,9 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo return false } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } return !rv.IsNil() }, clear: func(p pointer) { @@ -404,13 +443,13 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo set: func(p pointer, v pref.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, mutable: func(p pointer) pref.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) @@ -464,3 +503,41 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } return oi } + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 67b4ede67..9e3ed821e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -121,6 +121,7 @@ func (p pointer) String() *string { return p.v.Interface().(*string) } func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } func (p pointer) Extensions() *map[int32]ExtensionField { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 088aa85d4..9ecf23a85 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -109,6 +109,7 @@ func (p pointer) String() *string { return (*string)(p.p) func (p pointer) StringPtr() **string { return (**string)(p.p) } func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 57de9cc85..08cfb6054 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -14,6 +14,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -282,9 +283,9 @@ State: switch { case st.typ == validationTypeMap: switch num { - case 1: + case genid.MapEntry_Key_field_number: vi.typ = st.keyType - case 2: + case genid.MapEntry_Value_field_number: vi.typ = st.valType vi.mi = st.mi vi.requiredBit = 1 diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go deleted file mode 100644 index a3de1cf32..000000000 --- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mapsort provides sorted access to maps. -package mapsort - -import ( - "sort" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Range iterates over every map entry in sorted key order, -// calling f for each key and value encountered. -func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - var keys []protoreflect.MapKey - mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { - keys = append(keys, key) - return true - }) - sort.Slice(keys, func(i, j int) bool { - switch keyKind { - case protoreflect.BoolKind: - return !keys[i].Bool() && keys[j].Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return keys[i].Int() < keys[j].Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, - protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return keys[i].Uint() < keys[j].Uint() - case protoreflect.StringKind: - return keys[i].String() < keys[j].String() - default: - panic("invalid kind: " + keyKind.String()) - } - }) - for _, key := range keys { - if !f(key, mapv.Get(key)) { - break - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 000000000..2a24953f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 000000000..c8090e0c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 6b3001c66..14e774fb2 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,8 +52,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 24 - Patch = 0 + Minor = 27 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 4974b16d5..49f9b8c88 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" @@ -44,12 +45,14 @@ type UnmarshalOptions struct { } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { _, err := o.unmarshal(b, m.ProtoReflect()) return err @@ -115,10 +118,10 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) // Parse the tag (field number and wire type). num, wtyp, tagLen := protowire.ConsumeTag(b) if tagLen < 0 { - return protowire.ParseError(tagLen) + return errDecode } if num > protowire.MaxValidNumber { - return errors.New("invalid field number") + return errDecode } // Find the field descriptor for this field number. @@ -158,7 +161,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) } valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) if valLen < 0 { - return protowire.ParseError(valLen) + return errDecode } if !o.DiscardUnknown { m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) @@ -193,7 +196,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto } b, n = protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } var ( keyField = fd.MapKey() @@ -212,21 +215,21 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if num > protowire.MaxValidNumber { - return 0, errors.New("invalid field number") + return 0, errDecode } b = b[n:] err = errUnknown switch num { - case 1: + case genid.MapEntry_Key_field_number: key, n, err = o.unmarshalScalar(b, wtyp, keyField) if err != nil { break } haveKey = true - case 2: + case genid.MapEntry_Value_field_number: var v protoreflect.Value v, n, err = o.unmarshalScalar(b, wtyp, valField) if err != nil { @@ -245,7 +248,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } } else if err != nil { return 0, err @@ -271,3 +274,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto // to the unknown field set of a message. It is never returned from an exported // function. var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go index d6dc904dc..301eeb20f 100644 --- a/vendor/google.golang.org/protobuf/proto/decode_gen.go +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -27,7 +27,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil case protoreflect.EnumKind: @@ -36,7 +36,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil case protoreflect.Int32Kind: @@ -45,7 +45,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Sint32Kind: @@ -54,7 +54,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil case protoreflect.Uint32Kind: @@ -63,7 +63,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.Int64Kind: @@ -72,7 +72,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Sint64Kind: @@ -81,7 +81,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil case protoreflect.Uint64Kind: @@ -90,7 +90,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.Sfixed32Kind: @@ -99,7 +99,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Fixed32Kind: @@ -108,7 +108,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.FloatKind: @@ -117,7 +117,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil case protoreflect.Sfixed64Kind: @@ -126,7 +126,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Fixed64Kind: @@ -135,7 +135,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.DoubleKind: @@ -144,7 +144,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil case protoreflect.StringKind: @@ -153,7 +153,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) @@ -165,7 +165,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil case protoreflect.MessageKind: @@ -174,7 +174,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil case protoreflect.GroupKind: @@ -183,7 +183,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil default: @@ -197,12 +197,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) @@ -214,7 +214,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) return n, nil @@ -222,12 +222,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) @@ -239,7 +239,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) return n, nil @@ -247,12 +247,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -264,7 +264,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -272,12 +272,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) @@ -289,7 +289,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) return n, nil @@ -297,12 +297,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -314,7 +314,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -322,12 +322,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -339,7 +339,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -347,12 +347,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) @@ -364,7 +364,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) return n, nil @@ -372,12 +372,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -389,7 +389,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -397,12 +397,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -414,7 +414,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -422,12 +422,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -439,7 +439,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -447,12 +447,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) @@ -464,7 +464,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) return n, nil @@ -472,12 +472,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -489,7 +489,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -497,12 +497,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -514,7 +514,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -522,12 +522,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) @@ -539,7 +539,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) return n, nil @@ -549,7 +549,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return 0, errors.InvalidUTF8(string(fd.FullName())) @@ -562,7 +562,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) return n, nil @@ -572,7 +572,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { @@ -586,7 +586,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 7b47a1180..d18239c23 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,9 @@ package proto import ( - "sort" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" @@ -211,14 +208,15 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] if messageset.IsMessageSet(m.Descriptor()) { return o.marshalMessageSet(b, m) } - // There are many choices for what order we visit fields in. The default one here - // is chosen for reasonable efficiency and simplicity given the protoreflect API. - // It is not deterministic, since Message.Range does not return fields in any - // defined order. - // - // When using deterministic serialization, we sort the known fields. + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalField(b, fd, v) return err == nil }) @@ -229,27 +227,6 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] return b, nil } -// rangeFields visits fields in a defined order when deterministic serialization is enabled. -func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if !o.Deterministic { - m.Range(f) - return - } - var fds []protoreflect.FieldDescriptor - m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - fds = append(fds, fd) - return true - }) - sort.Slice(fds, func(a, b int) bool { - return fieldsort.Less(fds[a], fds[b]) - }) - for _, fd := range fds { - if !f(fd, m.Get(fd)) { - break - } - } -} - func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { switch { case fd.IsList(): @@ -292,8 +269,12 @@ func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, l func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { keyf := fd.MapKey() valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } var err error - o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) var pos int b, pos = appendSpeculativeLength(b) @@ -312,14 +293,6 @@ func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, ma return b, err } -func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - if !o.Deterministic { - mapv.Range(f) - return - } - mapsort.Range(mapv, kind, f) -} - // When encoding length-prefixed fields, we speculatively set aside some number of bytes // for the length, encode the data, and then encode the length (shifting the data if necessary // to make room). diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 10902bd85..4dba2b969 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -111,18 +111,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { // equalValue compares two singular values. func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.Message() != nil: - return equalMessage(x.Message(), y.Message()) - case fd.Kind() == pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() } diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 1d692c3a8..312d5d45c 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -28,8 +29,12 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b if !flags.ProtoLegacy { return b, errors.New("no support for message_set_wire_format") } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalMessageSetField(b, fd, v) return err == nil }) diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index ca14b09c3..1f0d183b1 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -32,3 +32,12 @@ var Error error func init() { Error = errors.Error } + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000..e4dfb1205 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000..37efda1af --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000..cebb36cda --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000..9af1d5648 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,374 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000..a7c5ceffc --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,252 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index b669a4e76..dd85915bd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -128,7 +128,6 @@ package protoreflect import ( "fmt" - "regexp" "strings" "google.golang.org/protobuf/encoding/protowire" @@ -408,19 +407,14 @@ type EnumRanges interface { doNotImplement } -var ( - regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`) - regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`) -) - // Name is the short name for a proto declaration. This is not the name // as used in Go source code, which might not be identical to the proto name. type Name string // e.g., "Kind" -// IsValid reports whether n is a syntactically valid name. +// IsValid reports whether s is a syntactically valid name. // An empty name is invalid. -func (n Name) IsValid() bool { - return regexName.MatchString(string(n)) +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) } // Names represent a list of names. @@ -443,10 +437,42 @@ type Names interface { // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" -// IsValid reports whether n is a syntactically valid full name. +// IsValid reports whether s is a syntactically valid full name. // An empty full name is invalid. -func (n FullName) IsValid() bool { - return regexFullName.MatchString(string(n)) +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') } // Name returns the short name, which is the last identifier segment. diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 32ea3d98c..121ba3a07 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -4,6 +4,10 @@ package protoreflect +import ( + "strconv" +) + // SourceLocations is a list of source locations. type SourceLocations interface { // Len reports the number of source locations in the proto file. @@ -11,9 +15,20 @@ type SourceLocations interface { // Get returns the ith SourceLocation. It panics if out of bounds. Get(int) SourceLocation - doNotImplement + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation - // TODO: Add ByPath and ByDescriptor helper methods. + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement } // SourceLocation describes a source location and @@ -39,6 +54,10 @@ type SourceLocation struct { LeadingComments string // TrailingComments is the trailing attached comment for the declaration. TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int } // SourcePath identifies part of a file descriptor for a source location. @@ -48,5 +67,62 @@ type SourceLocation struct { // See google.protobuf.SourceCodeInfo.Location.path. type SourcePath []int32 -// TODO: Add SourcePath.String method to pretty-print the path. For example: -// ".message_type[6].nested_type[15].field[3]" +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 000000000..b03c1223c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5be14a725..8e53c44a9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -232,11 +232,15 @@ type MessageDescriptor interface { type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } // MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. type MessageType interface { // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. New() Message // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. Zero() Message // Descriptor returns the message descriptor. @@ -245,6 +249,26 @@ type MessageType interface { Descriptor() MessageDescriptor } +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + // MessageDescriptors is a list of message declarations. type MessageDescriptors interface { // Len reports the number of messages. @@ -279,8 +303,15 @@ type FieldDescriptor interface { // JSONName reports the name used for JSON serialization. // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. JSONName() string + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + // HasPresence reports whether the field distinguishes between unpopulated // and default values. HasPresence() bool @@ -371,6 +402,9 @@ type FieldDescriptors interface { // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. // It returns nil if not found. ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor // ByNumber returns the FieldDescriptor for a field numbered n. // It returns nil if not found. ByNumber(n FieldNumber) FieldDescriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 5e5f96716..59f024c44 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -17,24 +17,49 @@ package protoregistry import ( "fmt" - "log" + "os" "strings" "sync" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" ) +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + // ignoreConflict reports whether to ignore a registration conflict // given the descriptor being registered and the error. // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { - log.Printf(""+ - "WARNING: %v\n"+ - "A future release will panic on registration conflicts. See:\n"+ - "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ - "\n", err) - return true + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } } var globalMutex sync.RWMutex @@ -69,7 +94,8 @@ type Files struct { // Note that enum values are in the top-level since that are in the same // scope as the parent enum. descsByName map[protoreflect.FullName]interface{} - filesByPath map[string]protoreflect.FileDescriptor + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int } type packageDescriptor struct { @@ -92,48 +118,16 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { r.descsByName = map[protoreflect.FullName]interface{}{ "": &packageDescriptor{}, } - r.filesByPath = make(map[string]protoreflect.FileDescriptor) + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) } path := file.Path() - if prev := r.filesByPath[path]; prev != nil { - // TODO: Remove this after some soak-in period after moving these types. - var prevPath string - const prevModule = "google.golang.org/genproto" - const prevVersion = "cb27e3aa (May 26th, 2020)" - switch path { - case "google/protobuf/field_mask.proto": - prevPath = prevModule + "/protobuf/field_mask" - case "google/protobuf/api.proto": - prevPath = prevModule + "/protobuf/api" - case "google/protobuf/type.proto": - prevPath = prevModule + "/protobuf/ptype" - case "google/protobuf/source_context.proto": - prevPath = prevModule + "/protobuf/source_context" - } - if r == GlobalFiles && prevPath != "" { - pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") - pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" - currPath := "google.golang.org/protobuf/types/known/" + pkgName - panic(fmt.Sprintf(""+ - "duplicate registration of %q\n"+ - "\n"+ - "The generated definition for this file has moved:\n"+ - "\tfrom: %q\n"+ - "\tto: %q\n"+ - "A dependency on the %q module must\n"+ - "be at version %v or higher.\n"+ - "\n"+ - "Upgrade the dependency by running:\n"+ - "\tgo get -u %v\n", - path, prevPath, currPath, prevModule, prevVersion, prevPath)) - } - + if prev := r.filesByPath[path]; len(prev) > 0 { + r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(file, err) { - err = nil + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err } - return err } for name := file.Package(); name != ""; name = name.Parent() { @@ -174,10 +168,52 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { r.descsByName[d.FullName()] = d }) - r.filesByPath[path] = file + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ return nil } +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + // FindDescriptorByName looks up a descriptor by the full name. // // This returns (nil, NotFound) if not found. @@ -273,6 +309,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // // This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { return nil, NotFound @@ -281,13 +318,19 @@ func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) globalMutex.RLock() defer globalMutex.RUnlock() } - if fd, ok := r.filesByPath[path]; ok { - return fd, nil + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) } - return nil, NotFound } -// NumFiles reports the number of registered files. +// NumFiles reports the number of registered files, +// including duplicate files with the same name. func (r *Files) NumFiles() int { if r == nil { return 0 @@ -296,10 +339,11 @@ func (r *Files) NumFiles() int { globalMutex.RLock() defer globalMutex.RUnlock() } - return len(r.filesByPath) + return r.numFiles } // RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. // The iteration order is undefined. func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { if r == nil { @@ -309,9 +353,11 @@ func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { globalMutex.RLock() defer globalMutex.RUnlock() } - for _, file := range r.filesByPath { - if !f(file) { - return + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } } } } @@ -560,13 +606,25 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp return nil, NotFound } -// FindMessageByName looks up a message by its full name. -// E.g., "google.protobuf.Any" +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". // -// This return (nil, NotFound) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - // The full name by itself is a valid URL. - return r.FindMessageByURL(string(message)) + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound } // FindMessageByURL looks up a message by a URL identifier. @@ -574,6 +632,8 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // // This returns (nil, NotFound) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. if r == nil { return nil, NotFound } @@ -613,6 +673,26 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E if xt, _ := v.(protoreflect.ExtensionType); xt != nil { return xt, nil } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) } return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 000000000..abe4ab511 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,3957 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Do not use. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, + 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, + 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, + 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, + 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, + 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, + 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, + 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, + 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, + 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, + 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, + 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType + (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel + (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 16: google.protobuf.FileOptions + (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 43, // [43:43] is the sub-list for method output_type + 43, // [43:43] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 6, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 5f9498e4e..8c10797b9 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -31,12 +31,100 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// package anypb import ( + proto "google.golang.org/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" + strings "strings" sync "sync" ) @@ -78,10 +166,13 @@ import ( // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } // ... // foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// if err := any.UnmarshalTo(foo); err != nil { // ... // } // @@ -158,6 +249,125 @@ type Any struct { Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + func (x *Any) Reset() { *x = Any{} if protoimpl.UnsafeEnabled { @@ -213,14 +423,15 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 3997c604f..a583ca2f6 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -31,13 +31,58 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// package durationpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" reflect "reflect" sync "sync" + time "time" ) // A Duration represents a signed, fixed-length span of time represented @@ -118,6 +163,91 @@ type Duration struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + func (x *Duration) Reset() { *x = Duration{} if protoimpl.UnsafeEnabled { @@ -173,16 +303,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, - 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 6fe6d42f1..c9ae92132 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -31,6 +31,48 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// package timestamppb import ( @@ -38,6 +80,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + time "time" ) // A Timestamp represents a point in time independent of any time zone or local @@ -91,7 +134,16 @@ import ( // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // -// Example 5: Compute Timestamp from current time in Python. +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() @@ -140,6 +192,73 @@ type Timestamp struct { Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` } +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + func (x *Timestamp) Reset() { *x = Timestamp{} if protoimpl.UnsafeEnabled { @@ -196,15 +315,15 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 055480b9e..7348c50c0 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -11,6 +11,7 @@ go: - "1.11.x" - "1.12.x" - "1.13.x" + - "1.14.x" - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 1f7e87e67..acf71402c 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -79,6 +79,8 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { parser.encoding = encoding } +var disableLineWrapping = false + // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ @@ -87,6 +89,9 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } + if disableLineWrapping { + emitter.best_width = -1 + } } // Destroy an emitter object. diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod deleted file mode 100644 index 1934e8769..000000000 --- a/vendor/gopkg.in/yaml.v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v2" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 89650e293..30813884c 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -175,7 +175,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -464,3 +464,15 @@ func isZero(v reflect.Value) bool { } return false } + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index adc47be7f..bce77c2a1 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -1858,10 +1858,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1978,10 +1975,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2098,10 +2092,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2280,10 +2271,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2398,10 +2386,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2568,10 +2553,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2943,10 +2925,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3063,10 +3042,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3183,10 +3159,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3339,10 +3312,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index 7f9772e7d..16ab9d5d6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admissionregistration.v1; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index c98aa7477..d5e802f3b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -1859,10 +1859,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1979,10 +1976,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2099,10 +2093,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2281,10 +2272,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2399,10 +2387,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2569,10 +2554,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2944,10 +2926,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3064,10 +3043,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3184,10 +3160,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3340,10 +3313,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 70ffa9219..bdae74037 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.admissionregistration.v1beta1; diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go new file mode 100644 index 000000000..a4da95d44 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=internal.apiserver.k8s.io + +// Package v1alpha1 contains the v1alpha1 version of the API used by the +// apiservers themselves. +package v1alpha1 // import "k8s.io/api/apiserverinternal/v1alpha1" diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go new file mode 100644 index 000000000..ee12c7d0d --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go @@ -0,0 +1,1700 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} } +func (*ServerStorageVersion) ProtoMessage() {} +func (*ServerStorageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{0} +} +func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerStorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerStorageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerStorageVersion.Merge(m, src) +} +func (m *ServerStorageVersion) XXX_Size() int { + return m.Size() +} +func (m *ServerStorageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ServerStorageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo + +func (m *StorageVersion) Reset() { *m = StorageVersion{} } +func (*StorageVersion) ProtoMessage() {} +func (*StorageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{1} +} +func (m *StorageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersion.Merge(m, src) +} +func (m *StorageVersion) XXX_Size() int { + return m.Size() +} +func (m *StorageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersion proto.InternalMessageInfo + +func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} } +func (*StorageVersionCondition) ProtoMessage() {} +func (*StorageVersionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{2} +} +func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionCondition.Merge(m, src) +} +func (m *StorageVersionCondition) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo + +func (m *StorageVersionList) Reset() { *m = StorageVersionList{} } +func (*StorageVersionList) ProtoMessage() {} +func (*StorageVersionList) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{3} +} +func (m *StorageVersionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionList.Merge(m, src) +} +func (m *StorageVersionList) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo + +func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} } +func (*StorageVersionSpec) ProtoMessage() {} +func (*StorageVersionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{4} +} +func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionSpec.Merge(m, src) +} +func (m *StorageVersionSpec) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo + +func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} } +func (*StorageVersionStatus) ProtoMessage() {} +func (*StorageVersionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_a3903ff5e3cc7a03, []int{5} +} +func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionStatus.Merge(m, src) +} +func (m *StorageVersionStatus) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ServerStorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.ServerStorageVersion") + proto.RegisterType((*StorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersion") + proto.RegisterType((*StorageVersionCondition)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionCondition") + proto.RegisterType((*StorageVersionList)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionList") + proto.RegisterType((*StorageVersionSpec)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionSpec") + proto.RegisterType((*StorageVersionStatus)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_a3903ff5e3cc7a03) +} + +var fileDescriptor_a3903ff5e3cc7a03 = []byte{ + // 763 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xd2, 0x52, 0x60, 0xaa, 0x54, 0x46, 0x08, 0xb5, 0x26, 0x5b, 0x6c, 0xa2, 0x41, 0x8d, + 0xbb, 0xd2, 0x88, 0x91, 0x98, 0x68, 0x58, 0x20, 0x06, 0x03, 0x62, 0x06, 0xe2, 0x01, 0x3d, 0x38, + 0xdd, 0x1d, 0xb7, 0x6b, 0xbb, 0x3b, 0x9b, 0x9d, 0x69, 0x13, 0x2e, 0xc6, 0x8f, 0xe0, 0x07, 0xf1, + 0xe8, 0x87, 0xe0, 0x64, 0xb8, 0x98, 0x90, 0x98, 0x34, 0xb2, 0x7e, 0x0b, 0x4e, 0x66, 0x66, 0x77, + 0x5b, 0xb6, 0x2d, 0xb1, 0xe1, 0xb0, 0xc9, 0xce, 0x7b, 0xef, 0xf7, 0x7b, 0x7f, 0xe6, 0x37, 0x0f, + 0xbc, 0x69, 0x3e, 0x63, 0x9a, 0x43, 0xf5, 0x66, 0xbb, 0x4e, 0x02, 0x8f, 0x70, 0xc2, 0xf4, 0x0e, + 0xf1, 0x2c, 0x1a, 0xe8, 0xb1, 0x03, 0xfb, 0x8e, 0xf8, 0x18, 0x09, 0x3a, 0x24, 0x70, 0x3c, 0x4e, + 0x02, 0x0f, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0xf9, 0x0d, 0xbc, 0xa2, 0xdb, 0xc4, 0x23, 0x01, 0xe6, + 0xc4, 0xd2, 0xfc, 0x80, 0x72, 0x0a, 0xef, 0x46, 0x30, 0x0d, 0xfb, 0x8e, 0x36, 0x04, 0xd3, 0x12, + 0x58, 0xf9, 0x91, 0xed, 0xf0, 0x46, 0xbb, 0xae, 0x99, 0xd4, 0xd5, 0x6d, 0x6a, 0x53, 0x5d, 0xa2, + 0xeb, 0xed, 0x4f, 0xf2, 0x24, 0x0f, 0xf2, 0x2f, 0x62, 0x2d, 0x3f, 0xe9, 0x17, 0xe3, 0x62, 0xb3, + 0xe1, 0x78, 0x24, 0x38, 0xd2, 0xfd, 0xa6, 0x2d, 0x2b, 0xd3, 0x5d, 0xc2, 0xb1, 0xde, 0x19, 0xaa, + 0xa5, 0xac, 0x5f, 0x86, 0x0a, 0xda, 0x1e, 0x77, 0x5c, 0x32, 0x04, 0x78, 0xfa, 0x3f, 0x00, 0x33, + 0x1b, 0xc4, 0xc5, 0x83, 0xb8, 0xea, 0x2f, 0x05, 0xcc, 0xef, 0xcb, 0x4e, 0xf7, 0x39, 0x0d, 0xb0, + 0x4d, 0xde, 0x91, 0x80, 0x39, 0xd4, 0x83, 0xab, 0xa0, 0x80, 0x7d, 0x27, 0x72, 0x6d, 0x6f, 0x96, + 0x94, 0x25, 0x65, 0x79, 0xc6, 0xb8, 0x79, 0xdc, 0xad, 0x64, 0xc2, 0x6e, 0xa5, 0xb0, 0xfe, 0x76, + 0x3b, 0x71, 0xa1, 0x8b, 0x71, 0x70, 0x1d, 0x14, 0x89, 0x67, 0x52, 0xcb, 0xf1, 0xec, 0x98, 0xa9, + 0x34, 0x21, 0xa1, 0x8b, 0x31, 0xb4, 0xb8, 0x95, 0x76, 0xa3, 0xc1, 0x78, 0xb8, 0x01, 0xe6, 0x2c, + 0x62, 0x52, 0x0b, 0xd7, 0x5b, 0x49, 0x35, 0xac, 0x94, 0x5d, 0xca, 0x2e, 0xcf, 0x18, 0x0b, 0x61, + 0xb7, 0x32, 0xb7, 0x39, 0xe8, 0x44, 0xc3, 0xf1, 0xd5, 0x1f, 0x13, 0x60, 0x76, 0xa0, 0xa3, 0x8f, + 0x60, 0x5a, 0x8c, 0xdb, 0xc2, 0x1c, 0xcb, 0x76, 0x0a, 0xb5, 0xc7, 0x5a, 0xff, 0xca, 0x7b, 0x53, + 0xd3, 0xfc, 0xa6, 0x2d, 0xef, 0x5f, 0x13, 0xd1, 0x5a, 0x67, 0x45, 0xdb, 0xab, 0x7f, 0x26, 0x26, + 0xdf, 0x25, 0x1c, 0x1b, 0x30, 0xee, 0x02, 0xf4, 0x6d, 0xa8, 0xc7, 0x0a, 0xdf, 0x83, 0x1c, 0xf3, + 0x89, 0x29, 0x3b, 0x2e, 0xd4, 0xd6, 0xb4, 0xb1, 0x04, 0xa5, 0xa5, 0xcb, 0xdc, 0xf7, 0x89, 0x69, + 0x5c, 0x8b, 0xd3, 0xe4, 0xc4, 0x09, 0x49, 0x52, 0x68, 0x82, 0x3c, 0xe3, 0x98, 0xb7, 0xc5, 0x2c, + 0x04, 0xfd, 0xf3, 0xab, 0xd1, 0x4b, 0x0a, 0x63, 0x36, 0x4e, 0x90, 0x8f, 0xce, 0x28, 0xa6, 0xae, + 0x7e, 0xcf, 0x82, 0xc5, 0x34, 0x60, 0x83, 0x7a, 0x96, 0xc3, 0xc5, 0xfc, 0x5e, 0x82, 0x1c, 0x3f, + 0xf2, 0x49, 0x2c, 0x85, 0x87, 0x49, 0x89, 0x07, 0x47, 0x3e, 0x39, 0xef, 0x56, 0x6e, 0x5f, 0x02, + 0x13, 0x6e, 0x24, 0x81, 0x70, 0xad, 0xd7, 0x41, 0x24, 0x89, 0x3b, 0xe9, 0x22, 0xce, 0xbb, 0x95, + 0x62, 0x0f, 0x96, 0xae, 0x0b, 0xbe, 0x06, 0x90, 0xd6, 0x65, 0x87, 0xd6, 0xab, 0x48, 0xc1, 0x42, + 0x59, 0x62, 0x10, 0x59, 0xa3, 0x1c, 0xd3, 0xc0, 0xbd, 0xa1, 0x08, 0x34, 0x02, 0x05, 0x3b, 0x00, + 0xb6, 0x30, 0xe3, 0x07, 0x01, 0xf6, 0x58, 0x54, 0xa2, 0xe3, 0x92, 0x52, 0x4e, 0x0e, 0xf5, 0xc1, + 0x78, 0x8a, 0x10, 0x88, 0x7e, 0xde, 0x9d, 0x21, 0x36, 0x34, 0x22, 0x03, 0xbc, 0x07, 0xf2, 0x01, + 0xc1, 0x8c, 0x7a, 0xa5, 0x49, 0xd9, 0x7e, 0xef, 0x0e, 0x90, 0xb4, 0xa2, 0xd8, 0x0b, 0xef, 0x83, + 0x29, 0x97, 0x30, 0x86, 0x6d, 0x52, 0xca, 0xcb, 0xc0, 0x62, 0x1c, 0x38, 0xb5, 0x1b, 0x99, 0x51, + 0xe2, 0xaf, 0xfe, 0x54, 0x00, 0x4c, 0xcf, 0x7d, 0xc7, 0x61, 0x1c, 0x7e, 0x18, 0x52, 0xba, 0x36, + 0x5e, 0x5f, 0x02, 0x2d, 0x75, 0x7e, 0x23, 0x4e, 0x39, 0x9d, 0x58, 0x2e, 0xa8, 0xfc, 0x10, 0x4c, + 0x3a, 0x9c, 0xb8, 0xe2, 0x16, 0xb3, 0xcb, 0x85, 0xda, 0xea, 0x95, 0x74, 0x68, 0x5c, 0x8f, 0x33, + 0x4c, 0x6e, 0x0b, 0x2e, 0x14, 0x51, 0x56, 0xe7, 0x07, 0xfb, 0x11, 0x0f, 0xa0, 0xfa, 0x7b, 0x02, + 0xcc, 0x8f, 0x92, 0x31, 0xfc, 0x02, 0x8a, 0x2c, 0x65, 0x67, 0x25, 0x45, 0x16, 0x35, 0xf6, 0xe3, + 0x18, 0xb1, 0xfa, 0xfa, 0xab, 0x2a, 0x6d, 0x67, 0x68, 0x30, 0x19, 0xdc, 0x03, 0x0b, 0x26, 0x75, + 0x5d, 0xea, 0x6d, 0x8d, 0xdc, 0x79, 0xb7, 0xc2, 0x6e, 0x65, 0x61, 0x63, 0x54, 0x00, 0x1a, 0x8d, + 0x83, 0x01, 0x00, 0x66, 0xf2, 0x04, 0xa2, 0xa5, 0x57, 0xa8, 0xbd, 0xb8, 0xd2, 0x80, 0x7b, 0x2f, + 0xa9, 0xbf, 0xb3, 0x7a, 0x26, 0x86, 0x2e, 0x64, 0x31, 0xb4, 0xe3, 0x33, 0x35, 0x73, 0x72, 0xa6, + 0x66, 0x4e, 0xcf, 0xd4, 0xcc, 0xd7, 0x50, 0x55, 0x8e, 0x43, 0x55, 0x39, 0x09, 0x55, 0xe5, 0x34, + 0x54, 0x95, 0x3f, 0xa1, 0xaa, 0x7c, 0xfb, 0xab, 0x66, 0x0e, 0xa7, 0x93, 0x3c, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xa1, 0x5f, 0xcf, 0x37, 0x78, 0x07, 0x00, 0x00, +} + +func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerStorageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerStorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DecodableVersions) > 0 { + for iNdEx := len(m.DecodableVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DecodableVersions[iNdEx]) + copy(dAtA[i:], m.DecodableVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DecodableVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.EncodingVersion) + copy(dAtA[i:], m.EncodingVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EncodingVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIServerID) + copy(dAtA[i:], m.APIServerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIServerID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StorageVersionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.CommonEncodingVersion != nil { + i -= len(*m.CommonEncodingVersion) + copy(dAtA[i:], *m.CommonEncodingVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CommonEncodingVersion))) + i-- + dAtA[i] = 0x12 + } + if len(m.StorageVersions) > 0 { + for iNdEx := len(m.StorageVersions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StorageVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServerStorageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIServerID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EncodingVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DecodableVersions) > 0 { + for _, s := range m.DecodableVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StorageVersionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StorageVersions) > 0 { + for _, e := range m.StorageVersions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.CommonEncodingVersion != nil { + l = len(*m.CommonEncodingVersion) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServerStorageVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerStorageVersion{`, + `APIServerID:` + fmt.Sprintf("%v", this.APIServerID) + `,`, + `EncodingVersion:` + fmt.Sprintf("%v", this.EncodingVersion) + `,`, + `DecodableVersions:` + fmt.Sprintf("%v", this.DecodableVersions) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersion{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionSpec", "StorageVersionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionStatus", "StorageVersionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageVersion{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersion", "StorageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageVersionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionSpec{`, + `}`, + }, "") + return s +} +func (this *StorageVersionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStorageVersions := "[]ServerStorageVersion{" + for _, f := range this.StorageVersions { + repeatedStringForStorageVersions += strings.Replace(strings.Replace(f.String(), "ServerStorageVersion", "ServerStorageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForStorageVersions += "}" + repeatedStringForConditions := "[]StorageVersionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StorageVersionCondition", "StorageVersionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StorageVersionStatus{`, + `StorageVersions:` + repeatedStringForStorageVersions + `,`, + `CommonEncodingVersion:` + valueToStringGenerated(this.CommonEncodingVersion) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServerStorageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerStorageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerStorageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIServerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIServerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncodingVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EncodingVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DecodableVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DecodableVersions = append(m.DecodableVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StorageVersionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageVersion{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersions = append(m.StorageVersions, ServerStorageVersion{}) + if err := m.StorageVersions[len(m.StorageVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonEncodingVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CommonEncodingVersion = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StorageVersionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto new file mode 100644 index 000000000..539c8c9e2 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -0,0 +1,121 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apiserverinternal.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// An API server instance reports the version it can decode and the version it +// encodes objects to when persisting objects in the backend. +message ServerStorageVersion { + // The ID of the reporting API server. + optional string apiServerID = 1; + + // The API server encodes the object to this version when persisting it in + // the backend (e.g., etcd). + optional string encodingVersion = 2; + + // The API server can decode objects encoded in these versions. + // The encodingVersion must be included in the decodableVersions. + // +listType=set + repeated string decodableVersions = 3; +} + +// Storage version of a specific resource. +message StorageVersion { + // The name is .. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is an empty spec. It is here to comply with Kubernetes API style. + optional StorageVersionSpec spec = 2; + + // API server instances report the version they can decode and the version they + // encode objects to when persisting objects in the backend. + optional StorageVersionStatus status = 3; +} + +// Describes the state of the storageVersion at a certain point. +message StorageVersionCondition { + // Type of the condition. + // +required + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + // +required + optional string status = 2; + + // If set, this represents the .metadata.generation that the condition was set based upon. + // +optional + optional int64 observedGeneration = 3; + + // Last time the condition transitioned from one status to another. + // +required + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // The reason for the condition's last transition. + // +required + optional string reason = 5; + + // A human readable message indicating details about the transition. + // +required + optional string message = 6; +} + +// A list of StorageVersions. +message StorageVersionList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StorageVersion items = 2; +} + +// StorageVersionSpec is an empty spec. +message StorageVersionSpec { +} + +// API server instances report the versions they can decode and the version they +// encode objects to when persisting objects in the backend. +message StorageVersionStatus { + // The reported versions per API server instance. + // +optional + // +listType=map + // +listMapKey=apiServerID + repeated ServerStorageVersion storageVersions = 1; + + // If all API server instances agree on the same encoding storage version, + // then this field is set to that version. Otherwise this field is left empty. + // API servers should finish updating its storageVersionStatus entry before + // serving write operations, so that this field will be in sync with the reality. + // +optional + optional string commonEncodingVersion = 2; + + // The latest available observations of the storageVersion's state. + // +optional + // +listType=map + // +listMapKey=type + repeated StorageVersionCondition conditions = 3; +} + diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go similarity index 73% rename from vendor/k8s.io/api/settings/v1alpha1/register.go rename to vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go index eee278d95..5e46e8d9a 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "settings.k8s.io" +const GroupName = "internal.apiserver.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} @@ -34,18 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &PodPreset{}, - &PodPresetList{}, + &StorageVersion{}, + &StorageVersionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go new file mode 100644 index 000000000..880091b6f --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Storage version of a specific resource. +type StorageVersion struct { + metav1.TypeMeta `json:",inline"` + // The name is .. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is an empty spec. It is here to comply with Kubernetes API style. + Spec StorageVersionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // API server instances report the version they can decode and the version they + // encode objects to when persisting objects in the backend. + Status StorageVersionStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// StorageVersionSpec is an empty spec. +type StorageVersionSpec struct{} + +// API server instances report the versions they can decode and the version they +// encode objects to when persisting objects in the backend. +type StorageVersionStatus struct { + // The reported versions per API server instance. + // +optional + // +listType=map + // +listMapKey=apiServerID + StorageVersions []ServerStorageVersion `json:"storageVersions,omitempty" protobuf:"bytes,1,opt,name=storageVersions"` + // If all API server instances agree on the same encoding storage version, + // then this field is set to that version. Otherwise this field is left empty. + // API servers should finish updating its storageVersionStatus entry before + // serving write operations, so that this field will be in sync with the reality. + // +optional + CommonEncodingVersion *string `json:"commonEncodingVersion,omitempty" protobuf:"bytes,2,opt,name=commonEncodingVersion"` + + // The latest available observations of the storageVersion's state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []StorageVersionCondition `json:"conditions,omitempty" protobuf:"bytes,3,opt,name=conditions"` +} + +// An API server instance reports the version it can decode and the version it +// encodes objects to when persisting objects in the backend. +type ServerStorageVersion struct { + // The ID of the reporting API server. + APIServerID string `json:"apiServerID,omitempty" protobuf:"bytes,1,opt,name=apiServerID"` + + // The API server encodes the object to this version when persisting it in + // the backend (e.g., etcd). + EncodingVersion string `json:"encodingVersion,omitempty" protobuf:"bytes,2,opt,name=encodingVersion"` + + // The API server can decode objects encoded in these versions. + // The encodingVersion must be included in the decodableVersions. + // +listType=set + DecodableVersions []string `json:"decodableVersions,omitempty" protobuf:"bytes,3,opt,name=decodableVersions"` +} + +type StorageVersionConditionType string + +const ( + // Indicates that encoding storage versions reported by all servers are equal. + AllEncodingVersionsEqual StorageVersionConditionType = "AllEncodingVersionsEqual" +) + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// Describes the state of the storageVersion at a certain point. +type StorageVersionCondition struct { + // Type of the condition. + // +required + Type StorageVersionConditionType `json:"type" protobuf:"bytes,1,opt,name=type"` + // Status of the condition, one of True, False, Unknown. + // +required + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // If set, this represents the .metadata.generation that the condition was set based upon. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + // Last time the condition transitioned from one status to another. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +required + Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` + // A human readable message indicating details about the transition. + // +required + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of StorageVersions. +type StorageVersionList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..b05c28595 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ServerStorageVersion = map[string]string{ + "": "An API server instance reports the version it can decode and the version it encodes objects to when persisting objects in the backend.", + "apiServerID": "The ID of the reporting API server.", + "encodingVersion": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", + "decodableVersions": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", +} + +func (ServerStorageVersion) SwaggerDoc() map[string]string { + return map_ServerStorageVersion +} + +var map_StorageVersion = map[string]string{ + "": "\n Storage version of a specific resource.", + "metadata": "The name is ..", + "spec": "Spec is an empty spec. It is here to comply with Kubernetes API style.", + "status": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend.", +} + +func (StorageVersion) SwaggerDoc() map[string]string { + return map_StorageVersion +} + +var map_StorageVersionCondition = map[string]string{ + "": "Describes the state of the storageVersion at a certain point.", + "type": "Type of the condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "observedGeneration": "If set, this represents the .metadata.generation that the condition was set based upon.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StorageVersionCondition) SwaggerDoc() map[string]string { + return map_StorageVersionCondition +} + +var map_StorageVersionList = map[string]string{ + "": "A list of StorageVersions.", +} + +func (StorageVersionList) SwaggerDoc() map[string]string { + return map_StorageVersionList +} + +var map_StorageVersionSpec = map[string]string{ + "": "StorageVersionSpec is an empty spec.", +} + +func (StorageVersionSpec) SwaggerDoc() map[string]string { + return map_StorageVersionSpec +} + +var map_StorageVersionStatus = map[string]string{ + "": "API server instances report the versions they can decode and the version they encode objects to when persisting objects in the backend.", + "storageVersions": "The reported versions per API server instance.", + "commonEncodingVersion": "If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality.", + "conditions": "The latest available observations of the storageVersion's state.", +} + +func (StorageVersionStatus) SwaggerDoc() map[string]string { + return map_StorageVersionStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..7e82a9070 --- /dev/null +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,175 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) { + *out = *in + if in.DecodableVersions != nil { + in, out := &in.DecodableVersions, &out.DecodableVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStorageVersion. +func (in *ServerStorageVersion) DeepCopy() *ServerStorageVersion { + if in == nil { + return nil + } + out := new(ServerStorageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersion) DeepCopyInto(out *StorageVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersion. +func (in *StorageVersion) DeepCopy() *StorageVersion { + if in == nil { + return nil + } + out := new(StorageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionCondition) DeepCopyInto(out *StorageVersionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionCondition. +func (in *StorageVersionCondition) DeepCopy() *StorageVersionCondition { + if in == nil { + return nil + } + out := new(StorageVersionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionList) DeepCopyInto(out *StorageVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionList. +func (in *StorageVersionList) DeepCopy() *StorageVersionList { + if in == nil { + return nil + } + out := new(StorageVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionSpec) DeepCopyInto(out *StorageVersionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionSpec. +func (in *StorageVersionSpec) DeepCopy() *StorageVersionSpec { + if in == nil { + return nil + } + out := new(StorageVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionStatus) DeepCopyInto(out *StorageVersionStatus) { + *out = *in + if in.StorageVersions != nil { + in, out := &in.StorageVersions, &out.StorageVersions + *out = make([]ServerStorageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CommonEncodingVersion != nil { + in, out := &in.CommonEncodingVersion, &out.CommonEncodingVersion + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StorageVersionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionStatus. +func (in *StorageVersionStatus) DeepCopy() *StorageVersionStatus { + if in == nil { + return nil + } + out := new(StorageVersionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 6ef25f50f..5b7bda500 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -3557,10 +3557,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3677,10 +3674,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3829,10 +3823,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4043,10 +4034,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4163,10 +4151,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4357,10 +4342,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4616,10 +4598,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4737,10 +4716,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4889,10 +4865,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5136,10 +5109,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5256,10 +5226,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5510,10 +5477,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5731,10 +5695,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5852,10 +5813,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6004,10 +5962,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6218,10 +6173,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6338,10 +6290,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6499,10 +6448,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6681,10 +6627,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6770,10 +6713,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6895,10 +6835,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6968,10 +6905,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7120,10 +7054,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7334,10 +7265,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7454,10 +7382,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7747,10 +7672,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8013,10 +7935,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8134,10 +8053,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 6c5527974..3ee640462 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1; diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index f81b55901..6bc56e382 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -2734,10 +2734,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2854,10 +2851,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3006,10 +3000,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3253,10 +3244,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3373,10 +3361,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3568,7 +3553,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3618,10 +3603,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3908,10 +3890,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4129,10 +4108,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4250,10 +4226,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4322,10 +4295,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4447,10 +4417,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4520,10 +4487,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4672,10 +4636,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4744,10 +4705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4926,7 +4884,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -4975,10 +4933,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5127,10 +5082,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5341,10 +5293,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5461,10 +5410,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5754,10 +5700,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6021,10 +5964,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6142,10 +6082,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 694f6570d..888f3e79e 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1beta1; diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 8a9f20052..09db97a8d 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -3878,10 +3878,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3998,10 +3995,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4150,10 +4144,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4364,10 +4355,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4484,10 +4472,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4678,10 +4663,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4937,10 +4919,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5058,10 +5037,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5210,10 +5186,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5457,10 +5430,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5577,10 +5547,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5831,10 +5798,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6052,10 +6016,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6173,10 +6134,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6325,10 +6283,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6539,10 +6494,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6659,10 +6611,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6820,10 +6769,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7002,10 +6948,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7091,10 +7034,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7216,10 +7156,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7289,10 +7226,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7441,10 +7375,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7513,10 +7444,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7695,7 +7623,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7744,10 +7672,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7896,10 +7821,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8110,10 +8032,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8230,10 +8149,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8523,10 +8439,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8789,10 +8702,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8910,10 +8820,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 17e43970f..1ea7e23a8 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.apps.v1beta2; diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 6524f8ca9..896e0d340 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -1264,10 +1264,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1349,10 +1346,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1501,10 +1495,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1642,10 +1633,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1760,10 +1748,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1912,10 +1897,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2029,10 +2011,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2199,10 +2178,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2460,7 +2436,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -2477,10 +2453,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index db7be173d..2fb124364 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authentication.v1; diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 6c391dbfa..3d8f76515 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -737,10 +737,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -889,10 +886,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1006,10 +1000,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1176,10 +1167,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1437,7 +1425,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1454,10 +1442,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index caf2a6a53..d3bff49eb 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authentication.v1beta1; diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index dbc0bdc71..66c7c06ae 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -1793,10 +1793,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1945,10 +1942,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2062,10 +2056,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2179,10 +2170,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2456,10 +2444,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2637,10 +2622,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2789,10 +2771,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2914,10 +2893,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3066,10 +3042,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3151,10 +3124,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3303,10 +3273,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3604,7 +3571,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3653,10 +3620,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3810,10 +3774,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3983,10 +3944,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index f68a04e49..931b0f499 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authorization.v1; diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 647c0c582..4331d3e5b 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -1793,10 +1793,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1945,10 +1942,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2062,10 +2056,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2179,10 +2170,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2456,10 +2444,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2637,10 +2622,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2789,10 +2771,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2914,10 +2893,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3066,10 +3042,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3151,10 +3124,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3303,10 +3273,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3604,7 +3571,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3653,10 +3620,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3810,10 +3774,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3983,10 +3944,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 3876a3eeb..a9e221608 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.authorization.v1beta1; diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 1e3d89076..a6ff299d7 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{1} + return fileDescriptor_2bb1f2101a7f10e2, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{2} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{3} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{8} + return fileDescriptor_2bb1f2101a7f10e2, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{9} + return fileDescriptor_2bb1f2101a7f10e2, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{10} + return fileDescriptor_2bb1f2101a7f10e2, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{11} + return fileDescriptor_2bb1f2101a7f10e2, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{12} + return fileDescriptor_2bb1f2101a7f10e2, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{13} + return fileDescriptor_2bb1f2101a7f10e2, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{14} + return fileDescriptor_2bb1f2101a7f10e2, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{15} + return fileDescriptor_2bb1f2101a7f10e2, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +554,7 @@ var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{16} + return fileDescriptor_2bb1f2101a7f10e2, []int{18} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +582,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{17} + return fileDescriptor_2bb1f2101a7f10e2, []int{19} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +610,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{18} + return fileDescriptor_2bb1f2101a7f10e2, []int{20} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -580,6 +636,8 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") @@ -606,102 +664,206 @@ func init() { } var fileDescriptor_2bb1f2101a7f10e2 = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, + // 1605 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x70, 0xd3, 0xd6, + 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x4c, 0x78, 0x58, 0x19, 0x3d, 0x86, + 0xc9, 0x7b, 0xaf, 0x48, 0x8d, 0x4b, 0x19, 0xba, 0x8c, 0xdc, 0x52, 0x98, 0xc6, 0x10, 0x6e, 0x02, + 0xa5, 0xbf, 0xc3, 0x8d, 0x7c, 0x71, 0x44, 0x2c, 0xc9, 0x23, 0xc9, 0x1e, 0xc2, 0x0c, 0x33, 0xed, + 0xa2, 0xfb, 0x6e, 0x68, 0xb7, 0xed, 0x4c, 0xb7, 0x5d, 0xb3, 0xee, 0x8e, 0x25, 0x0b, 0x66, 0xca, + 0xca, 0x53, 0xd4, 0x45, 0x17, 0x5d, 0x75, 0xcb, 0xaa, 0xa3, 0xab, 0x2b, 0x59, 0xb2, 0x2d, 0xc5, + 0x71, 0x42, 0xa6, 0xed, 0xb0, 0xb3, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x1c, 0x50, 0xb6, + 0x2f, 0xd9, 0x92, 0x66, 0xca, 0xdb, 0xad, 0x4d, 0x6a, 0x19, 0xd4, 0xa1, 0xb6, 0xdc, 0xa6, 0x46, + 0xcd, 0xb4, 0x64, 0x7e, 0x40, 0x9a, 0x9a, 0x4c, 0x5a, 0x8e, 0x69, 0xab, 0xa4, 0xa1, 0x19, 0x75, + 0xb9, 0xbd, 0x2c, 0xd7, 0xa9, 0x41, 0x2d, 0xe2, 0xd0, 0x9a, 0xd4, 0xb4, 0x4c, 0xc7, 0x44, 0xa7, + 0x7c, 0x52, 0x89, 0x34, 0x35, 0x29, 0x42, 0x2a, 0xb5, 0x97, 0x17, 0xce, 0xd7, 0x35, 0x67, 0xab, + 0xb5, 0x29, 0xa9, 0xa6, 0x2e, 0xd7, 0xcd, 0xba, 0x29, 0x33, 0x8e, 0xcd, 0xd6, 0x5d, 0xf6, 0xc5, + 0x3e, 0xd8, 0x2f, 0x1f, 0x69, 0x41, 0x8c, 0x08, 0x55, 0x4d, 0x8b, 0x0e, 0x90, 0xb6, 0x70, 0xa1, + 0x4b, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x23, 0x37, 0xb7, 0xeb, 0x8c, 0xc9, 0xa2, 0xb6, + 0xd9, 0xb2, 0x54, 0xba, 0x27, 0x2e, 0x5b, 0xd6, 0xa9, 0x43, 0x06, 0xc9, 0x92, 0x93, 0xb8, 0xac, + 0x96, 0xe1, 0x68, 0x7a, 0xbf, 0x98, 0x8b, 0xbb, 0x31, 0xd8, 0xea, 0x16, 0xd5, 0x49, 0x2f, 0x9f, + 0xf8, 0x5b, 0x16, 0xce, 0x54, 0x4c, 0xc3, 0x21, 0x1e, 0x07, 0xe6, 0x8f, 0xa8, 0x52, 0xc7, 0xd2, + 0xd4, 0x75, 0xf6, 0x1b, 0x55, 0x20, 0x6f, 0x10, 0x9d, 0x16, 0x33, 0x8b, 0x99, 0xa5, 0x29, 0x45, + 0x7e, 0xd2, 0x11, 0xc6, 0xdc, 0x8e, 0x90, 0xbf, 0x46, 0x74, 0xfa, 0xb2, 0x23, 0x08, 0xfd, 0x8a, + 0x93, 0x02, 0x18, 0x8f, 0x04, 0x33, 0x66, 0x74, 0x1b, 0x8a, 0x0e, 0xb1, 0xea, 0xd4, 0x59, 0x69, + 0x53, 0x8b, 0xd4, 0xe9, 0x4d, 0x47, 0x6b, 0x68, 0x0f, 0x88, 0xa3, 0x99, 0x46, 0x31, 0xbb, 0x98, + 0x59, 0x1a, 0x57, 0xfe, 0xed, 0x76, 0x84, 0xe2, 0x46, 0x02, 0x0d, 0x4e, 0xe4, 0x46, 0x6d, 0x40, + 0xb1, 0xb3, 0x5b, 0xa4, 0xd1, 0xa2, 0xc5, 0xdc, 0x62, 0x66, 0xa9, 0x50, 0x96, 0xa4, 0xae, 0x83, + 0x84, 0x5a, 0x91, 0x9a, 0xdb, 0x75, 0xe6, 0x31, 0x81, 0xc9, 0xa4, 0x1b, 0x2d, 0x62, 0x38, 0x9a, + 0xb3, 0xa3, 0x9c, 0x70, 0x3b, 0x02, 0xda, 0xe8, 0x43, 0xc3, 0x03, 0x24, 0x20, 0x19, 0xa6, 0xd4, + 0x40, 0x6f, 0xc5, 0x71, 0xa6, 0x9b, 0x79, 0xae, 0x9b, 0xa9, 0xae, 0x42, 0xbb, 0x34, 0xe2, 0x1f, + 0x29, 0x9a, 0x76, 0x88, 0xd3, 0xb2, 0x0f, 0x46, 0xd3, 0x9f, 0xc0, 0x29, 0xb5, 0x65, 0x59, 0xd4, + 0x48, 0x56, 0xf5, 0x19, 0xb7, 0x23, 0x9c, 0xaa, 0x24, 0x11, 0xe1, 0x64, 0x7e, 0xf4, 0x10, 0x8e, + 0xc5, 0x0f, 0xf7, 0xa3, 0xed, 0xd3, 0xfc, 0x81, 0xc7, 0x2a, 0xfd, 0x90, 0x78, 0x90, 0x9c, 0xb8, + 0xce, 0xf3, 0x43, 0xe8, 0xfc, 0x51, 0x06, 0x4e, 0x57, 0x2c, 0xd3, 0xb6, 0x6f, 0x51, 0xcb, 0xd6, + 0x4c, 0xe3, 0xfa, 0xe6, 0x3d, 0xaa, 0x3a, 0x98, 0xde, 0xa5, 0x16, 0x35, 0x54, 0x8a, 0x16, 0x21, + 0xbf, 0xad, 0x19, 0x35, 0xae, 0xf1, 0xe9, 0x40, 0xe3, 0x1f, 0x68, 0x46, 0x0d, 0xb3, 0x13, 0x8f, + 0x82, 0xd9, 0x24, 0x1b, 0xa7, 0x88, 0x28, 0xbc, 0x0c, 0x40, 0x9a, 0x1a, 0x17, 0xc0, 0x54, 0x31, + 0xa5, 0x20, 0x4e, 0x07, 0x2b, 0x6b, 0x57, 0xf9, 0x09, 0x8e, 0x50, 0x89, 0xdf, 0xe4, 0xe0, 0xf8, + 0x7b, 0xf7, 0x1d, 0x6a, 0x19, 0xa4, 0x11, 0x0b, 0xb6, 0x32, 0x80, 0xce, 0xbe, 0xaf, 0x75, 0x1d, + 0x21, 0x04, 0xab, 0x86, 0x27, 0x38, 0x42, 0x85, 0x4c, 0x98, 0xf1, 0xbf, 0xd6, 0x69, 0x83, 0xaa, + 0x8e, 0x69, 0xb1, 0xcb, 0x16, 0xca, 0x6f, 0xa5, 0xd9, 0xc3, 0x96, 0xbc, 0xd4, 0x23, 0xb5, 0x97, + 0xa5, 0x55, 0xb2, 0x49, 0x1b, 0x01, 0xab, 0x82, 0xdc, 0x8e, 0x30, 0x53, 0x8d, 0xc1, 0xe1, 0x1e, + 0x78, 0x44, 0xa0, 0xe0, 0x07, 0xc4, 0x7e, 0xac, 0x3f, 0xeb, 0x76, 0x84, 0xc2, 0x46, 0x17, 0x06, + 0x47, 0x31, 0x13, 0xa2, 0x3a, 0xff, 0xaa, 0xa3, 0x5a, 0xfc, 0xae, 0xdf, 0x30, 0x7e, 0x6c, 0xfe, + 0x2d, 0x0c, 0xb3, 0x05, 0xd3, 0x3c, 0x6c, 0xf6, 0x63, 0x99, 0xe3, 0xfc, 0x59, 0xd3, 0x95, 0x08, + 0x16, 0x8e, 0x21, 0xa3, 0x9d, 0xc1, 0x89, 0x60, 0x34, 0x03, 0x9d, 0xdc, 0x4b, 0x12, 0x10, 0x1f, + 0x67, 0xe1, 0xe4, 0x15, 0xd3, 0xd2, 0x1e, 0x78, 0x51, 0xde, 0x58, 0x33, 0x6b, 0x2b, 0xbc, 0xf2, + 0x53, 0x0b, 0xdd, 0x81, 0x49, 0x4f, 0x7b, 0x35, 0xe2, 0x10, 0x66, 0xa3, 0x42, 0xf9, 0xcd, 0xe1, + 0x74, 0xed, 0x27, 0x86, 0x2a, 0x75, 0x48, 0xd7, 0xaa, 0xdd, 0xff, 0x70, 0x88, 0x8a, 0x6e, 0x43, + 0xde, 0x6e, 0x52, 0x95, 0x5b, 0xf2, 0xa2, 0x94, 0xd8, 0x81, 0x48, 0x09, 0x77, 0x5c, 0x6f, 0x52, + 0xb5, 0x9b, 0x47, 0xbc, 0x2f, 0xcc, 0x10, 0xd1, 0x1d, 0x98, 0xb0, 0x99, 0xaf, 0x71, 0xb3, 0x5d, + 0x1a, 0x01, 0x9b, 0xf1, 0x2b, 0x33, 0x1c, 0x7d, 0xc2, 0xff, 0xc6, 0x1c, 0x57, 0xfc, 0x2a, 0x07, + 0x8b, 0x09, 0x9c, 0x15, 0xd3, 0xa8, 0x69, 0x2c, 0xc5, 0x5f, 0x81, 0xbc, 0xb3, 0xd3, 0x0c, 0x5c, + 0xfc, 0x42, 0x70, 0xd1, 0x8d, 0x9d, 0xa6, 0x57, 0x84, 0xce, 0xee, 0xc6, 0xef, 0xd1, 0x61, 0x86, + 0x80, 0x56, 0xc3, 0x07, 0x65, 0x63, 0x58, 0xfc, 0x5a, 0x2f, 0x3b, 0xc2, 0x80, 0xae, 0x4b, 0x0a, + 0x91, 0xe2, 0x97, 0xf7, 0x32, 0x42, 0x83, 0xd8, 0xce, 0x86, 0x45, 0x0c, 0xdb, 0x97, 0xa4, 0xe9, + 0x81, 0x87, 0xff, 0x6f, 0x38, 0x23, 0x7b, 0x1c, 0xca, 0x02, 0xbf, 0x05, 0x5a, 0xed, 0x43, 0xc3, + 0x03, 0x24, 0xa0, 0x73, 0x30, 0x61, 0x51, 0x62, 0x9b, 0x06, 0x2f, 0x38, 0xa1, 0x72, 0x31, 0xfb, + 0x17, 0xf3, 0x53, 0xf4, 0x5f, 0x38, 0xa2, 0x53, 0xdb, 0x26, 0x75, 0xca, 0xbb, 0x81, 0x59, 0x4e, + 0x78, 0xa4, 0xea, 0xff, 0x8d, 0x83, 0x73, 0xf1, 0x59, 0x06, 0x4e, 0x27, 0xe8, 0x71, 0x55, 0xb3, + 0x1d, 0xf4, 0x69, 0x9f, 0x17, 0x4b, 0x43, 0x66, 0x0c, 0xcd, 0xf6, 0x7d, 0x78, 0x8e, 0xcb, 0x9e, + 0x0c, 0xfe, 0x89, 0x78, 0xf0, 0x87, 0x30, 0xae, 0x39, 0x54, 0xf7, 0xac, 0x92, 0x5b, 0x2a, 0x94, + 0xcb, 0x7b, 0x77, 0x33, 0xe5, 0x28, 0x87, 0x1f, 0xbf, 0xea, 0x01, 0x61, 0x1f, 0x4f, 0xfc, 0x3d, + 0x9b, 0xf8, 0x2c, 0xcf, 0xcd, 0x51, 0x1b, 0x66, 0xd8, 0x97, 0x9f, 0x8a, 0x31, 0xbd, 0xcb, 0x1f, + 0x97, 0x16, 0x44, 0x29, 0xc5, 0x5b, 0x39, 0xc1, 0x6f, 0x31, 0xb3, 0x1e, 0x43, 0xc5, 0x3d, 0x52, + 0xd0, 0x32, 0x14, 0x74, 0xcd, 0xc0, 0xb4, 0xd9, 0xd0, 0x54, 0x62, 0xf3, 0x1e, 0x88, 0x95, 0x9f, + 0x6a, 0xf7, 0x6f, 0x1c, 0xa5, 0x41, 0x6f, 0x43, 0x41, 0x27, 0xf7, 0x43, 0x96, 0x1c, 0x63, 0x39, + 0xc6, 0xe5, 0x15, 0xaa, 0xdd, 0x23, 0x1c, 0xa5, 0x43, 0xf7, 0xa0, 0xe4, 0xd7, 0x94, 0xca, 0xda, + 0xcd, 0x48, 0xdb, 0xb4, 0x46, 0x2d, 0x95, 0x1a, 0x8e, 0xe7, 0x1a, 0x79, 0x86, 0x24, 0xba, 0x1d, + 0xa1, 0xb4, 0x91, 0x4a, 0x89, 0x77, 0x41, 0x12, 0x7f, 0xca, 0xc1, 0x99, 0xd4, 0x34, 0x80, 0x2e, + 0x03, 0x32, 0x37, 0x6d, 0x6a, 0xb5, 0x69, 0xed, 0x7d, 0xbf, 0xeb, 0xf7, 0x1a, 0x14, 0x4f, 0xe7, + 0x39, 0xbf, 0x26, 0x5e, 0xef, 0x3b, 0xc5, 0x03, 0x38, 0x90, 0x0a, 0x47, 0xbd, 0xb8, 0xf0, 0xb5, + 0xac, 0xf1, 0x5e, 0x68, 0x6f, 0x41, 0x37, 0xef, 0x76, 0x84, 0xa3, 0xab, 0x51, 0x10, 0x1c, 0xc7, + 0x44, 0x2b, 0x30, 0xcb, 0x93, 0x7d, 0x8f, 0xd6, 0x4f, 0x72, 0xad, 0xcf, 0x56, 0xe2, 0xc7, 0xb8, + 0x97, 0xde, 0x83, 0xa8, 0x51, 0x5b, 0xb3, 0x68, 0x2d, 0x84, 0xc8, 0xc7, 0x21, 0xde, 0x8d, 0x1f, + 0xe3, 0x5e, 0x7a, 0xa4, 0x83, 0xc0, 0x51, 0x13, 0x2d, 0x38, 0xce, 0x20, 0xff, 0xe3, 0x76, 0x04, + 0xa1, 0x92, 0x4e, 0x8a, 0x77, 0xc3, 0x12, 0x1f, 0xe5, 0x81, 0xf7, 0x0e, 0x2c, 0x40, 0x2e, 0xc4, + 0x52, 0xef, 0x62, 0x4f, 0xea, 0x9d, 0x8b, 0x36, 0x8a, 0x91, 0x34, 0x7b, 0x03, 0x26, 0x4c, 0x16, + 0x19, 0xdc, 0x2e, 0xe7, 0x53, 0xc2, 0x29, 0x2c, 0x69, 0x21, 0x90, 0x02, 0x5e, 0x2e, 0xe3, 0xa1, + 0xc5, 0x81, 0xd0, 0x55, 0xc8, 0x37, 0xcd, 0x5a, 0x50, 0x88, 0xfe, 0x9f, 0x02, 0xb8, 0x66, 0xd6, + 0xec, 0x18, 0xdc, 0xa4, 0x77, 0x63, 0xef, 0x5f, 0xcc, 0x20, 0xd0, 0x47, 0x30, 0x19, 0x14, 0x7c, + 0xde, 0x1d, 0xc8, 0x29, 0x70, 0x83, 0x06, 0x50, 0x65, 0xda, 0x4b, 0x64, 0xc1, 0x09, 0x0e, 0xe1, + 0xd0, 0x43, 0x98, 0x57, 0x7b, 0xe7, 0xa9, 0xe2, 0x91, 0x5d, 0x6b, 0x67, 0xea, 0xb4, 0xab, 0xfc, + 0xcb, 0xed, 0x08, 0xf3, 0x7d, 0x24, 0xb8, 0x5f, 0x92, 0xf7, 0x32, 0xca, 0x3b, 0x45, 0xe6, 0x14, + 0xe9, 0x2f, 0x1b, 0xd4, 0xed, 0xfb, 0x2f, 0x0b, 0x4e, 0x70, 0x08, 0x27, 0x7e, 0x9b, 0x87, 0xe9, + 0x58, 0xf7, 0x79, 0xc8, 0x9e, 0xe1, 0xb7, 0x11, 0x07, 0xe6, 0x19, 0x3e, 0xdc, 0x81, 0x7a, 0x86, + 0x0f, 0x79, 0x48, 0x9e, 0xe1, 0x0b, 0x3b, 0x24, 0xcf, 0x88, 0xbc, 0x6c, 0x80, 0x67, 0x3c, 0xcb, + 0x01, 0xea, 0x0f, 0x62, 0xf4, 0x39, 0x4c, 0xf8, 0xe5, 0x62, 0x9f, 0x25, 0x35, 0x6c, 0x6e, 0x78, + 0xf5, 0xe4, 0xa8, 0x3d, 0xd3, 0x4f, 0x76, 0xa8, 0xe9, 0x87, 0x1e, 0xc4, 0x94, 0x18, 0xd6, 0xdc, + 0xc4, 0x49, 0xf1, 0x33, 0x98, 0xb4, 0x83, 0xf1, 0x2a, 0x3f, 0xfa, 0x78, 0xc5, 0x14, 0x1e, 0x0e, + 0x56, 0x21, 0x24, 0xaa, 0xc1, 0x34, 0x89, 0x4e, 0x38, 0xe3, 0x23, 0x3d, 0x63, 0xce, 0x1b, 0xa7, + 0x62, 0xa3, 0x4d, 0x0c, 0x55, 0xfc, 0xb9, 0xd7, 0xac, 0x7e, 0xd8, 0xff, 0x15, 0xcd, 0x7a, 0x78, + 0x33, 0xe6, 0x3f, 0xc2, 0xb2, 0xdf, 0x67, 0x61, 0xae, 0xb7, 0x48, 0x8e, 0xb4, 0x4c, 0x78, 0x30, + 0x70, 0x23, 0x92, 0x1d, 0xe9, 0xd2, 0xe1, 0x0c, 0x34, 0xe4, 0xae, 0x33, 0x6a, 0x89, 0xdc, 0x81, + 0x5b, 0x42, 0xfc, 0x21, 0xae, 0xa3, 0xd1, 0x17, 0x2e, 0x09, 0xeb, 0xc9, 0xec, 0x21, 0xad, 0x27, + 0x5f, 0xb1, 0x9a, 0x7e, 0xcc, 0xc2, 0xf1, 0xd7, 0x1b, 0xfa, 0xe1, 0x77, 0x79, 0x8f, 0xfb, 0xf5, + 0xf5, 0x7a, 0xcf, 0x3e, 0xd4, 0x8a, 0xed, 0xcb, 0x2c, 0x8c, 0xb3, 0xd1, 0xec, 0x10, 0x16, 0x6a, + 0x97, 0x63, 0x0b, 0xb5, 0xb3, 0x29, 0x15, 0x8e, 0xdd, 0x28, 0x71, 0x7d, 0x76, 0xad, 0x67, 0x7d, + 0x76, 0x6e, 0x57, 0xa4, 0xf4, 0x65, 0xd9, 0x3b, 0x30, 0x15, 0x0a, 0x44, 0x6f, 0x78, 0xbd, 0x2a, + 0x9f, 0x29, 0x33, 0xcc, 0xb6, 0xe1, 0x86, 0x25, 0x1c, 0x26, 0x43, 0x0a, 0x51, 0x83, 0x42, 0x44, + 0xc2, 0xde, 0x98, 0x3d, 0x6a, 0x3b, 0xba, 0x2e, 0x9e, 0xea, 0x52, 0xf7, 0xe7, 0x04, 0x65, 0xe9, + 0xc9, 0x8b, 0xd2, 0xd8, 0xd3, 0x17, 0xa5, 0xb1, 0xe7, 0x2f, 0x4a, 0x63, 0x5f, 0xb8, 0xa5, 0xcc, + 0x13, 0xb7, 0x94, 0x79, 0xea, 0x96, 0x32, 0xcf, 0xdd, 0x52, 0xe6, 0x17, 0xb7, 0x94, 0xf9, 0xfa, + 0xd7, 0xd2, 0xd8, 0xc7, 0xd9, 0xf6, 0xf2, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x83, 0x99, + 0xe1, 0x70, 0x1d, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1138,6 +1300,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1214,6 +1388,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1723,6 +1909,44 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -1896,6 +2120,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1923,6 +2151,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2083,6 +2315,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2200,6 +2458,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2214,6 +2473,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2320,20 +2580,357 @@ func (this *ScaleStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2466,10 +3063,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2659,10 +3253,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2849,10 +3440,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3001,10 +3589,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3215,10 +3800,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3335,10 +3917,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3480,10 +4059,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3647,10 +4223,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3870,16 +4443,49 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4099,16 +4705,49 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4328,10 +4967,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4551,10 +5187,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4705,10 +5338,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4859,10 +5489,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5000,10 +5627,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5138,10 +5762,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5290,10 +5911,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5362,10 +5980,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5466,10 +6081,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index f50ed9d1f..08b8667c8 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v1; @@ -30,6 +30,60 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in the requests and limits, describing a single container in +// each of the pods of the current scale target(e.g. CPU or memory). The values will be +// averaged together before being compared to the target. Such metrics are built into +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + + // container is the name of the container in the pods of the scaling target. + optional string container = 5; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + + // container is the name of the container in the pods of the scaling taget + optional string container = 4; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -184,8 +238,10 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -207,6 +263,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod of the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -218,8 +283,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -241,6 +308,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 55b2a0d6b..3343177b2 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -165,6 +165,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -176,8 +182,10 @@ const ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -196,6 +204,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod of the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -267,6 +283,30 @@ type ResourceMetricSource struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in the requests and limits, describing a single container in +// each of the pods of the current scale target(e.g. CPU or memory). The values will be +// averaged together before being compared to the target. Such metrics are built into +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. + Container string `json:"container" protobuf:"bytes,5,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -289,8 +329,10 @@ type ExternalMetricSource struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -309,6 +351,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -414,6 +463,30 @@ type ResourceMetricStatus struct { CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 129ce2b48..192dc5f39 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -27,6 +27,30 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in the requests and limits, describing a single container in each of the pods of the current scale target(e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built into Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "container": "container is the name of the container in the pods of the scaling target.", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "container": "container is the name of the container in the pods of the scaling taget", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -122,12 +146,13 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -135,12 +160,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index ddb601128..05ae6ebda 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -25,6 +25,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -252,6 +300,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -288,6 +341,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index e129e41b8..28832c152 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{1} + return fileDescriptor_26c1bfc7a52d0478, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{2} + return fileDescriptor_26c1bfc7a52d0478, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{3} + return fileDescriptor_26c1bfc7a52d0478, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{8} + return fileDescriptor_26c1bfc7a52d0478, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{9} + return fileDescriptor_26c1bfc7a52d0478, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{10} + return fileDescriptor_26c1bfc7a52d0478, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{11} + return fileDescriptor_26c1bfc7a52d0478, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{12} + return fileDescriptor_26c1bfc7a52d0478, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{13} + return fileDescriptor_26c1bfc7a52d0478, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{14} + return fileDescriptor_26c1bfc7a52d0478, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{15} + return fileDescriptor_26c1bfc7a52d0478, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,6 +552,8 @@ func (m *ResourceMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus") @@ -519,100 +577,203 @@ func init() { } var fileDescriptor_26c1bfc7a52d0478 = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, + // 1562 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x6c, 0x1b, 0xc5, + 0x1b, 0x8f, 0xed, 0xcd, 0xeb, 0x73, 0x9a, 0xc7, 0xb4, 0xff, 0xd6, 0x4d, 0xff, 0xb5, 0xa3, 0x15, + 0x42, 0xa1, 0xa2, 0xbb, 0xad, 0x09, 0x0f, 0x09, 0x21, 0x11, 0x1b, 0x68, 0x2b, 0x92, 0xb6, 0x4c, + 0xd2, 0x0a, 0x41, 0x8b, 0x98, 0xac, 0xa7, 0xce, 0x12, 0x7b, 0xd7, 0xda, 0x19, 0x5b, 0x4d, 0x11, + 0x12, 0x42, 0xe2, 0xce, 0x05, 0xce, 0x20, 0x71, 0x45, 0x88, 0x0b, 0x9c, 0xb9, 0xf5, 0xd8, 0x63, + 0x2b, 0x90, 0x45, 0xcd, 0x81, 0x33, 0xd7, 0x9e, 0xd0, 0xcc, 0xce, 0xae, 0x77, 0xfd, 0x88, 0x1d, + 0x37, 0x0d, 0x0f, 0xf5, 0xe6, 0xdd, 0xf9, 0xbe, 0xdf, 0x37, 0xf3, 0xfb, 0x5e, 0xf3, 0xad, 0xe1, + 0xc2, 0xce, 0x2b, 0xcc, 0xb0, 0x5d, 0x73, 0xa7, 0xbe, 0x45, 0x3d, 0x87, 0x72, 0xca, 0xcc, 0x06, + 0x75, 0x4a, 0xae, 0x67, 0xaa, 0x05, 0x52, 0xb3, 0x4d, 0x52, 0xe7, 0x2e, 0xb3, 0x48, 0xc5, 0x76, + 0xca, 0x66, 0x23, 0xbf, 0x45, 0x39, 0x39, 0x6f, 0x96, 0xa9, 0x43, 0x3d, 0xc2, 0x69, 0xc9, 0xa8, + 0x79, 0x2e, 0x77, 0x51, 0xd6, 0x97, 0x37, 0x48, 0xcd, 0x36, 0x22, 0xf2, 0x86, 0x92, 0x5f, 0x3c, + 0x5b, 0xb6, 0xf9, 0x76, 0x7d, 0xcb, 0xb0, 0xdc, 0xaa, 0x59, 0x76, 0xcb, 0xae, 0x29, 0xd5, 0xb6, + 0xea, 0xb7, 0xe4, 0x93, 0x7c, 0x90, 0xbf, 0x7c, 0xb8, 0x45, 0x3d, 0x62, 0xde, 0x72, 0x3d, 0x6a, + 0x36, 0xba, 0x4c, 0x2e, 0xae, 0xb4, 0x65, 0xaa, 0xc4, 0xda, 0xb6, 0x1d, 0xea, 0xed, 0x9a, 0xb5, + 0x9d, 0xb2, 0x54, 0xf2, 0x28, 0x73, 0xeb, 0x9e, 0x45, 0xf7, 0xa5, 0xc5, 0xcc, 0x2a, 0xe5, 0xa4, + 0x97, 0x2d, 0xb3, 0x9f, 0x96, 0x57, 0x77, 0xb8, 0x5d, 0xed, 0x36, 0xf3, 0xd2, 0x20, 0x05, 0x66, + 0x6d, 0xd3, 0x2a, 0xe9, 0xd4, 0xd3, 0xff, 0x48, 0xc2, 0xe9, 0xa2, 0xeb, 0x70, 0x22, 0x34, 0xb0, + 0x3a, 0xc4, 0x3a, 0xe5, 0x9e, 0x6d, 0x6d, 0xc8, 0xdf, 0xa8, 0x08, 0x9a, 0x43, 0xaa, 0x34, 0x93, + 0x58, 0x4a, 0x2c, 0x4f, 0x17, 0xcc, 0xbb, 0xcd, 0xdc, 0x58, 0xab, 0x99, 0xd3, 0x2e, 0x93, 0x2a, + 0x7d, 0xd4, 0xcc, 0xe5, 0xba, 0x89, 0x33, 0x02, 0x18, 0x21, 0x82, 0xa5, 0x32, 0x7a, 0x17, 0x32, + 0x9c, 0x78, 0x65, 0xca, 0x57, 0x1b, 0xd4, 0x23, 0x65, 0x7a, 0x8d, 0xdb, 0x15, 0xfb, 0x0e, 0xe1, + 0xb6, 0xeb, 0x64, 0x92, 0x4b, 0x89, 0xe5, 0xf1, 0xc2, 0xff, 0x5b, 0xcd, 0x5c, 0x66, 0xb3, 0x8f, + 0x0c, 0xee, 0xab, 0x8d, 0x1a, 0x80, 0x62, 0x6b, 0xd7, 0x49, 0xa5, 0x4e, 0x33, 0xa9, 0xa5, 0xc4, + 0x72, 0x3a, 0x6f, 0x18, 0xed, 0x28, 0x09, 0x59, 0x31, 0x6a, 0x3b, 0x65, 0x19, 0x36, 0x81, 0xcb, + 0x8c, 0x77, 0xea, 0xc4, 0xe1, 0x36, 0xdf, 0x2d, 0x1c, 0x6f, 0x35, 0x73, 0x68, 0xb3, 0x0b, 0x0d, + 0xf7, 0xb0, 0x80, 0x4c, 0x98, 0xb6, 0x02, 0xde, 0x32, 0x9a, 0xe4, 0x66, 0x41, 0x71, 0x33, 0xdd, + 0x26, 0xb4, 0x2d, 0xa3, 0xff, 0xb9, 0x07, 0xd3, 0x9c, 0xf0, 0x3a, 0x3b, 0x18, 0xa6, 0xdf, 0x87, + 0x93, 0x56, 0xdd, 0xf3, 0xa8, 0xd3, 0x9f, 0xea, 0xd3, 0xad, 0x66, 0xee, 0x64, 0xb1, 0x9f, 0x10, + 0xee, 0xaf, 0x8f, 0x3e, 0x81, 0xa3, 0xf1, 0xc5, 0xc7, 0x61, 0xfb, 0x94, 0x3a, 0xe0, 0xd1, 0x62, + 0x37, 0x24, 0xee, 0x65, 0x67, 0xff, 0x9c, 0x7f, 0x99, 0x80, 0x53, 0x45, 0xcf, 0x65, 0xec, 0x3a, + 0xf5, 0x98, 0xed, 0x3a, 0x57, 0xb6, 0x3e, 0xa2, 0x16, 0xc7, 0xf4, 0x16, 0xf5, 0xa8, 0x63, 0x51, + 0xb4, 0x04, 0xda, 0x8e, 0xed, 0x94, 0x14, 0xe3, 0x33, 0x01, 0xe3, 0x6f, 0xdb, 0x4e, 0x09, 0xcb, + 0x15, 0x21, 0x21, 0x7d, 0x92, 0x8c, 0x4b, 0x44, 0x08, 0xcf, 0x03, 0x90, 0x9a, 0xad, 0x0c, 0x48, + 0x2a, 0xa6, 0x0b, 0x48, 0xc9, 0xc1, 0xea, 0xd5, 0x4b, 0x6a, 0x05, 0x47, 0xa4, 0xf4, 0xaf, 0x52, + 0x70, 0xec, 0xcd, 0xdb, 0x9c, 0x7a, 0x0e, 0xa9, 0xc4, 0x92, 0x2d, 0x0f, 0x50, 0x95, 0xcf, 0x97, + 0xdb, 0x81, 0x10, 0x82, 0xad, 0x87, 0x2b, 0x38, 0x22, 0x85, 0x5c, 0x98, 0xf5, 0x9f, 0x36, 0x68, + 0x85, 0x5a, 0xdc, 0xf5, 0xe4, 0x66, 0xd3, 0xf9, 0x17, 0xf6, 0xf2, 0x07, 0x33, 0x44, 0xe9, 0x31, + 0x1a, 0xe7, 0x8d, 0x35, 0xb2, 0x45, 0x2b, 0x81, 0x6a, 0x01, 0xb5, 0x9a, 0xb9, 0xd9, 0xf5, 0x18, + 0x1c, 0xee, 0x80, 0x47, 0x04, 0xd2, 0x7e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0x5a, 0xcd, 0x5c, 0x7a, + 0xb3, 0x0d, 0x83, 0xa3, 0x98, 0x7d, 0xb2, 0x5a, 0x7b, 0xd2, 0x59, 0xad, 0x7f, 0xdd, 0xed, 0x18, + 0x3f, 0x37, 0xff, 0x15, 0x8e, 0xd9, 0x86, 0x19, 0x95, 0x36, 0x8f, 0xe3, 0x99, 0x63, 0xea, 0x58, + 0x33, 0xc5, 0x08, 0x16, 0x8e, 0x21, 0xa3, 0xdd, 0xde, 0x85, 0x60, 0x34, 0x07, 0x9d, 0xd8, 0x4f, + 0x11, 0xd0, 0x7f, 0x4e, 0xc2, 0x89, 0x8b, 0xae, 0x67, 0xdf, 0x11, 0x59, 0x5e, 0xb9, 0xea, 0x96, + 0x56, 0x55, 0xfb, 0xa7, 0x1e, 0xfa, 0x10, 0xa6, 0x04, 0x7b, 0x25, 0xc2, 0x89, 0xf4, 0x51, 0x3a, + 0x7f, 0x6e, 0x38, 0xae, 0xfd, 0xc2, 0xb0, 0x4e, 0x39, 0x69, 0x7b, 0xb5, 0xfd, 0x0e, 0x87, 0xa8, + 0xe8, 0x26, 0x68, 0xac, 0x46, 0x2d, 0xe5, 0xc9, 0x57, 0x8d, 0xbd, 0xaf, 0x21, 0x46, 0x9f, 0x8d, + 0x6e, 0xd4, 0xa8, 0xd5, 0x2e, 0x26, 0xe2, 0x09, 0x4b, 0x58, 0x44, 0x61, 0x82, 0xc9, 0x80, 0x53, + 0xbe, 0x7b, 0x6d, 0x54, 0x03, 0x12, 0xa4, 0x30, 0xab, 0x4c, 0x4c, 0xf8, 0xcf, 0x58, 0x81, 0xeb, + 0x9f, 0xa7, 0x60, 0xa9, 0x8f, 0x66, 0xd1, 0x75, 0x4a, 0xb6, 0x2c, 0xf6, 0x17, 0x41, 0xe3, 0xbb, + 0xb5, 0x20, 0xd8, 0x57, 0x82, 0xdd, 0x6e, 0xee, 0xd6, 0x44, 0x3b, 0x7a, 0x66, 0x90, 0xbe, 0x90, + 0xc3, 0x12, 0x01, 0xad, 0x85, 0xa7, 0x4a, 0xc6, 0xb0, 0xd4, 0xb6, 0x1e, 0x35, 0x73, 0x3d, 0xee, + 0x5f, 0x46, 0x88, 0x14, 0xdf, 0xbc, 0xa8, 0x0d, 0x15, 0xc2, 0xf8, 0xa6, 0x47, 0x1c, 0xe6, 0x5b, + 0xb2, 0xab, 0x41, 0xac, 0x9f, 0x19, 0xce, 0xdd, 0x42, 0xa3, 0xb0, 0xa8, 0x76, 0x81, 0xd6, 0xba, + 0xd0, 0x70, 0x0f, 0x0b, 0xe8, 0x59, 0x98, 0xf0, 0x28, 0x61, 0xae, 0xa3, 0x5a, 0x4f, 0x48, 0x2e, + 0x96, 0x6f, 0xb1, 0x5a, 0x45, 0xcf, 0xc1, 0x64, 0x95, 0x32, 0x46, 0xca, 0x34, 0x33, 0x2e, 0x05, + 0xe7, 0x94, 0xe0, 0xe4, 0xba, 0xff, 0x1a, 0x07, 0xeb, 0xfa, 0x83, 0x04, 0x9c, 0xea, 0xc3, 0xe3, + 0x9a, 0xcd, 0x38, 0xba, 0xd1, 0x15, 0xcf, 0xc6, 0x90, 0xb5, 0xc3, 0x66, 0x7e, 0x34, 0xcf, 0x2b, + 0xdb, 0x53, 0xc1, 0x9b, 0x48, 0x2c, 0xdf, 0x80, 0x71, 0x9b, 0xd3, 0xaa, 0xf0, 0x4a, 0x6a, 0x39, + 0x9d, 0x7f, 0x79, 0xc4, 0x58, 0x2b, 0x1c, 0x51, 0x36, 0xc6, 0x2f, 0x09, 0x34, 0xec, 0x83, 0xea, + 0xbf, 0x24, 0xfb, 0x9e, 0x4d, 0x04, 0x3c, 0xfa, 0x18, 0x66, 0xe5, 0x93, 0x5f, 0x99, 0x31, 0xbd, + 0xa5, 0x4e, 0x38, 0x30, 0xa7, 0xf6, 0x68, 0xe8, 0x85, 0xe3, 0x6a, 0x2b, 0xb3, 0x1b, 0x31, 0x68, + 0xdc, 0x61, 0x0a, 0x9d, 0x87, 0x74, 0xd5, 0x76, 0x30, 0xad, 0x55, 0x6c, 0x8b, 0x30, 0x75, 0x2f, + 0x92, 0x2d, 0x69, 0xbd, 0xfd, 0x1a, 0x47, 0x65, 0xd0, 0x8b, 0x90, 0xae, 0x92, 0xdb, 0xa1, 0x4a, + 0x4a, 0xaa, 0x1c, 0x55, 0xf6, 0xd2, 0xeb, 0xed, 0x25, 0x1c, 0x95, 0x43, 0xd7, 0x44, 0x34, 0x88, + 0x2a, 0xcd, 0x32, 0x9a, 0xa4, 0xf9, 0xcc, 0xa0, 0xf3, 0xa9, 0x22, 0x2f, 0x4a, 0x44, 0x24, 0x72, + 0x24, 0x04, 0x0e, 0xb0, 0xf4, 0x1f, 0x35, 0x38, 0xbd, 0x67, 0xee, 0xa3, 0xb7, 0x00, 0xb9, 0x5b, + 0x8c, 0x7a, 0x0d, 0x5a, 0xba, 0xe0, 0x5f, 0xfa, 0xc5, 0xfd, 0x44, 0x70, 0x9c, 0xf2, 0x5b, 0xe2, + 0x95, 0xae, 0x55, 0xdc, 0x43, 0x03, 0x59, 0x70, 0x44, 0x24, 0x83, 0x4f, 0xa8, 0xad, 0xae, 0x42, + 0xfb, 0xcb, 0xb4, 0x85, 0x56, 0x33, 0x77, 0x64, 0x2d, 0x0a, 0x82, 0xe3, 0x98, 0x68, 0x15, 0xe6, + 0x54, 0xad, 0xef, 0x20, 0xf8, 0x84, 0x62, 0x60, 0xae, 0x18, 0x5f, 0xc6, 0x9d, 0xf2, 0x02, 0xa2, + 0x44, 0x99, 0xed, 0xd1, 0x52, 0x08, 0xa1, 0xc5, 0x21, 0xde, 0x88, 0x2f, 0xe3, 0x4e, 0x79, 0x54, + 0x81, 0x59, 0x85, 0xaa, 0xf8, 0xce, 0x8c, 0x4b, 0x97, 0x3d, 0x3f, 0xa4, 0xcb, 0xfc, 0xa2, 0x1b, + 0xc6, 0x60, 0x31, 0x86, 0x85, 0x3b, 0xb0, 0x11, 0x07, 0xb0, 0x82, 0x12, 0xc7, 0x32, 0x13, 0xd2, + 0xd2, 0xeb, 0x23, 0xe6, 0x60, 0x58, 0x2b, 0xdb, 0xed, 0x2b, 0x7c, 0xc5, 0x70, 0xc4, 0x8e, 0xfe, + 0xbd, 0x06, 0xd0, 0x8e, 0x30, 0xb4, 0x12, 0x2b, 0xf2, 0x4b, 0x1d, 0x45, 0x7e, 0x3e, 0x7a, 0x39, + 0x8d, 0x14, 0xf4, 0xeb, 0x30, 0xe1, 0xca, 0xcc, 0x53, 0xc1, 0x90, 0x1f, 0xb4, 0xed, 0xb0, 0x97, + 0x86, 0x68, 0x05, 0x10, 0xa5, 0x53, 0xe5, 0xaf, 0x42, 0x43, 0x97, 0x41, 0xab, 0xb9, 0xa5, 0xa0, + 0xf9, 0x9d, 0x1b, 0x84, 0x7a, 0xd5, 0x2d, 0xb1, 0x18, 0xe6, 0x94, 0xd8, 0xbb, 0x78, 0x8b, 0x25, + 0x0e, 0xfa, 0x00, 0xa6, 0x82, 0xeb, 0x86, 0xba, 0x9b, 0xac, 0x0c, 0xc2, 0xec, 0x35, 0x03, 0x17, + 0x66, 0x44, 0x05, 0x0d, 0x56, 0x70, 0x88, 0x89, 0x3e, 0x4b, 0xc0, 0x82, 0xd5, 0x39, 0xd3, 0x65, + 0x26, 0x87, 0x6b, 0xdd, 0x7b, 0x8e, 0xdd, 0x85, 0xff, 0xb5, 0x9a, 0xb9, 0x85, 0x2e, 0x11, 0xdc, + 0x6d, 0x4e, 0x1c, 0x92, 0xaa, 0x2b, 0xab, 0x6c, 0x38, 0x43, 0x1c, 0xb2, 0xd7, 0xec, 0xe1, 0x1f, + 0x32, 0x58, 0xc1, 0x21, 0xa6, 0xfe, 0x83, 0x06, 0x33, 0xb1, 0xbb, 0xf0, 0xdf, 0x11, 0x33, 0x7e, + 0x6a, 0x1d, 0x6c, 0xcc, 0xf8, 0x98, 0x07, 0x1f, 0x33, 0x3e, 0xee, 0xa1, 0xc6, 0x8c, 0x6f, 0xf2, + 0x30, 0x63, 0x26, 0x72, 0xc8, 0x1e, 0x31, 0xf3, 0x20, 0x05, 0xa8, 0x3b, 0xe7, 0x91, 0x05, 0x13, + 0xfe, 0xd0, 0x75, 0x10, 0xbd, 0x3e, 0xbc, 0x7f, 0xa9, 0xb6, 0xae, 0xa0, 0x3b, 0x46, 0xb5, 0xe4, + 0x50, 0xa3, 0x1a, 0x3d, 0x88, 0x91, 0x36, 0xbc, 0x0c, 0xf4, 0x1d, 0x6b, 0x6f, 0xc2, 0x14, 0x0b, + 0x66, 0x41, 0x6d, 0xf4, 0x59, 0x50, 0xb2, 0x1e, 0x4e, 0x81, 0x21, 0x24, 0x2a, 0xc1, 0x0c, 0x89, + 0x8e, 0x63, 0xe3, 0x23, 0x1d, 0x63, 0x5e, 0xcc, 0x7e, 0xb1, 0x39, 0x2c, 0x86, 0xaa, 0xff, 0xda, + 0xe9, 0x5b, 0xbf, 0x2a, 0xfc, 0x63, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0x7f, 0xc2, 0xbd, 0xdf, 0x24, + 0x61, 0xbe, 0xb3, 0xb1, 0x8e, 0xf4, 0xf9, 0xe3, 0x4e, 0xcf, 0x6f, 0x38, 0xc9, 0x91, 0x36, 0x1d, + 0xce, 0x6a, 0x43, 0x7e, 0x9d, 0x8d, 0x7a, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x8d, 0x73, 0x34, + 0xfa, 0x27, 0xa2, 0x3e, 0x1f, 0x54, 0x93, 0x87, 0xf4, 0x41, 0xf5, 0x09, 0xd3, 0xf4, 0x5d, 0x12, + 0x8e, 0x3d, 0xfd, 0x4f, 0x61, 0xf8, 0xaf, 0x8f, 0x3f, 0x75, 0xf3, 0xf5, 0xf4, 0x9f, 0x81, 0x61, + 0x02, 0xb9, 0x70, 0xf6, 0xee, 0xc3, 0xec, 0xd8, 0xbd, 0x87, 0xd9, 0xb1, 0xfb, 0x0f, 0xb3, 0x63, + 0x9f, 0xb6, 0xb2, 0x89, 0xbb, 0xad, 0x6c, 0xe2, 0x5e, 0x2b, 0x9b, 0xb8, 0xdf, 0xca, 0x26, 0x7e, + 0x6b, 0x65, 0x13, 0x5f, 0xfc, 0x9e, 0x1d, 0x7b, 0x6f, 0x52, 0xb5, 0x9e, 0xbf, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x4d, 0x4c, 0xa8, 0x42, 0x87, 0x1c, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1081,6 +1242,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1157,6 +1330,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1556,6 +1741,44 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -1741,6 +1964,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1768,6 +1995,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1891,6 +2122,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2024,6 +2281,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2038,6 +2296,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2110,21 +2369,358 @@ func (this *ResourceMetricStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2257,10 +2853,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2450,10 +3043,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2640,10 +3230,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2792,10 +3379,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3006,10 +3590,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3126,10 +3707,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3285,10 +3863,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3500,10 +4075,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3723,16 +4295,49 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3952,16 +4557,49 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4181,10 +4819,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4404,10 +5039,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4558,10 +5190,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4712,10 +5341,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4853,10 +5479,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4991,10 +5614,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index f90f93f9f..5ad55fa72 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v2beta1; @@ -30,6 +30,60 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2beta1"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + + // container is the name of the container in the pods of the scaling target + optional string container = 4; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + + // container is the name of the container in the pods of the scaling target + optional string container = 4; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -200,8 +254,10 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -223,6 +279,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -234,8 +299,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -257,6 +324,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index d76e87967..05023d9bc 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -76,6 +76,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -87,8 +93,10 @@ const ( // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -107,6 +115,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -178,6 +194,30 @@ type ResourceMetricSource struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -265,8 +305,10 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", + // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -285,6 +327,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -354,6 +403,30 @@ type ResourceMetricStatus struct { CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,4,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index a0d5f5337..08a54621f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -27,6 +27,30 @@ package v2beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -123,12 +147,13 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -136,12 +161,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index c51e05b8f..f10c7884d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -25,6 +25,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -263,6 +311,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -299,6 +352,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index c69d6cb9e..cece3c877 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -47,10 +47,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{1} + return fileDescriptor_592ad94d7d6be24f, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{2} + return fileDescriptor_592ad94d7d6be24f, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} + return fileDescriptor_592ad94d7d6be24f, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} + return fileDescriptor_592ad94d7d6be24f, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} + return fileDescriptor_592ad94d7d6be24f, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} + return fileDescriptor_592ad94d7d6be24f, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} + return fileDescriptor_592ad94d7d6be24f, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} + return fileDescriptor_592ad94d7d6be24f, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} + return fileDescriptor_592ad94d7d6be24f, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} + return fileDescriptor_592ad94d7d6be24f, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} + return fileDescriptor_592ad94d7d6be24f, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} + return fileDescriptor_592ad94d7d6be24f, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} + return fileDescriptor_592ad94d7d6be24f, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} + return fileDescriptor_592ad94d7d6be24f, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} + return fileDescriptor_592ad94d7d6be24f, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} + return fileDescriptor_592ad94d7d6be24f, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} + return fileDescriptor_592ad94d7d6be24f, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,6 +720,8 @@ func (m *ResourceMetricStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricStatus") proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") @@ -693,111 +751,202 @@ func init() { } var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1657 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45, - 0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55, - 0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6, - 0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22, - 0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa, - 0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb, - 0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70, - 0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93, - 0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed, - 0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea, - 0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17, - 0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72, - 0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f, - 0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5, - 0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1, - 0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60, - 0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4, - 0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38, - 0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37, - 0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22, - 0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a, - 0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd, - 0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81, - 0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59, - 0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a, - 0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6, - 0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45, - 0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b, - 0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30, - 0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76, - 0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7, - 0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54, - 0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31, - 0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2, - 0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3, - 0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6, - 0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31, - 0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d, - 0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24, - 0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02, - 0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1, - 0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06, - 0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59, - 0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55, - 0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04, - 0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17, - 0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8, - 0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22, - 0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65, - 0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07, - 0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a, - 0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11, - 0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0, - 0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40, - 0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23, - 0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f, - 0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46, - 0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54, - 0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86, - 0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92, - 0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b, - 0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11, - 0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa, - 0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c, - 0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2, - 0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45, - 0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d, - 0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0, - 0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c, - 0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02, - 0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d, - 0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c, - 0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54, - 0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58, - 0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a, - 0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b, - 0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4, - 0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58, - 0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12, - 0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4, - 0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40, - 0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b, - 0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e, - 0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2, - 0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd, - 0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86, - 0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42, - 0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc, - 0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8, - 0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b, - 0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a, - 0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd, - 0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51, - 0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5, - 0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3, - 0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f, - 0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc, - 0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84, - 0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e, - 0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1, - 0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43, - 0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf, - 0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00, + // 1741 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x92, 0xd4, 0xd7, 0x50, 0x9f, 0xe3, 0x2f, 0x42, 0x86, 0x49, 0x61, 0x6b, 0xb4, 0xae, + 0x51, 0x2f, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0x5b, 0xdb, 0xb0, 0x64, 0xab, 0x43, + 0x59, 0x2d, 0x0a, 0xd9, 0xe8, 0x70, 0x77, 0x44, 0x4d, 0x45, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, + 0x81, 0xa2, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e, + 0x60, 0x18, 0x41, 0x10, 0xf8, 0x16, 0xe7, 0x42, 0xc4, 0xcc, 0x31, 0xc7, 0xdc, 0x7c, 0x0a, 0xe6, + 0x63, 0x3f, 0x49, 0x89, 0x94, 0x20, 0x29, 0xd0, 0x8d, 0x3b, 0xf3, 0xde, 0xef, 0xcd, 0x7b, 0xf3, + 0x7b, 0x6f, 0xde, 0x0c, 0xc1, 0xad, 0x9d, 0xeb, 0xae, 0x46, 0xed, 0xe2, 0x4e, 0xb3, 0x42, 0x1c, + 0x8b, 0x78, 0xc4, 0x2d, 0xb6, 0x88, 0x65, 0xda, 0x4e, 0x51, 0x4e, 0xe0, 0x06, 0x2d, 0xe2, 0xa6, + 0x67, 0xbb, 0x06, 0xae, 0x51, 0xab, 0x5a, 0x6c, 0x95, 0x2a, 0xc4, 0xc3, 0xa5, 0x62, 0x95, 0x58, + 0xc4, 0xc1, 0x1e, 0x31, 0xb5, 0x86, 0x63, 0x7b, 0x36, 0xcc, 0x0b, 0x79, 0x0d, 0x37, 0xa8, 0x16, + 0x91, 0xd7, 0xa4, 0xfc, 0xdc, 0xb5, 0x2a, 0xf5, 0xb6, 0x9b, 0x15, 0xcd, 0xb0, 0xeb, 0xc5, 0xaa, + 0x5d, 0xb5, 0x8b, 0x5c, 0xad, 0xd2, 0xdc, 0xe2, 0x5f, 0xfc, 0x83, 0xff, 0x12, 0x70, 0x73, 0x6a, + 0xc4, 0xbc, 0x61, 0x3b, 0xa4, 0xd8, 0x5a, 0x48, 0x9a, 0x9c, 0x5b, 0x0c, 0x65, 0xea, 0xd8, 0xd8, + 0xa6, 0x16, 0x71, 0x76, 0x8b, 0x8d, 0x9d, 0x2a, 0x57, 0x72, 0x88, 0x6b, 0x37, 0x1d, 0x83, 0x1c, + 0x48, 0xcb, 0x2d, 0xd6, 0x89, 0x87, 0x7b, 0xd9, 0x2a, 0xee, 0xa5, 0xe5, 0x34, 0x2d, 0x8f, 0xd6, + 0xbb, 0xcd, 0xfc, 0xa1, 0x9f, 0x82, 0x6b, 0x6c, 0x93, 0x3a, 0x4e, 0xea, 0xa9, 0x3f, 0x28, 0xe0, + 0xd2, 0xb2, 0x6d, 0x79, 0x98, 0x69, 0x20, 0xe9, 0xc4, 0x2a, 0xf1, 0x1c, 0x6a, 0x94, 0xf9, 0x6f, + 0xb8, 0x0c, 0x32, 0x16, 0xae, 0x93, 0x9c, 0x32, 0xaf, 0x5c, 0x19, 0xd7, 0x8b, 0x2f, 0xda, 0x85, + 0xa1, 0x4e, 0xbb, 0x90, 0xb9, 0x87, 0xeb, 0xe4, 0x4d, 0xbb, 0x50, 0xe8, 0x0e, 0x9c, 0xe6, 0xc3, + 0x30, 0x11, 0xc4, 0x95, 0xe1, 0x3a, 0x18, 0xf1, 0xb0, 0x53, 0x25, 0x5e, 0x2e, 0x35, 0xaf, 0x5c, + 0xc9, 0x96, 0x7e, 0xa3, 0xed, 0xbf, 0x7f, 0x9a, 0x58, 0xc2, 0x3a, 0xd7, 0xd1, 0xa7, 0xa4, 0xd1, + 0x11, 0xf1, 0x8d, 0x24, 0x16, 0x2c, 0x82, 0x71, 0xc3, 0x5f, 0x7b, 0x2e, 0xcd, 0xd7, 0x37, 0x2b, + 0x45, 0xc7, 0x43, 0xa7, 0x42, 0x19, 0xf5, 0xc7, 0x7d, 0xbc, 0xf5, 0xb0, 0xd7, 0x74, 0x8f, 0xc6, + 0xdb, 0x4d, 0x30, 0x6a, 0x34, 0x1d, 0x87, 0x58, 0xbe, 0xbb, 0x0b, 0x83, 0xb9, 0xbb, 0x81, 0x6b, + 0x4d, 0x22, 0x16, 0xa2, 0x4f, 0x4b, 0xd3, 0xa3, 0xcb, 0x02, 0x09, 0xf9, 0x90, 0x07, 0xf7, 0xfa, + 0x7d, 0x05, 0x5c, 0x5c, 0x76, 0x6c, 0xd7, 0xdd, 0x20, 0x8e, 0x4b, 0x6d, 0xeb, 0x7e, 0xe5, 0x3f, + 0xc4, 0xf0, 0x10, 0xd9, 0x22, 0x0e, 0xb1, 0x0c, 0x02, 0xe7, 0x41, 0x66, 0x87, 0x5a, 0xa6, 0xf4, + 0x79, 0xc2, 0xf7, 0xf9, 0x2e, 0xb5, 0x4c, 0xc4, 0x67, 0x98, 0x04, 0x8f, 0x4a, 0x2a, 0x2e, 0x11, + 0x71, 0xb9, 0x04, 0x00, 0x6e, 0x50, 0x69, 0x40, 0xae, 0x0a, 0x4a, 0x39, 0xb0, 0xb4, 0x76, 0x47, + 0xce, 0xa0, 0x88, 0x94, 0xfa, 0x5c, 0x01, 0x67, 0xff, 0xfa, 0xc4, 0x23, 0x8e, 0x85, 0x6b, 0x31, + 0xca, 0xfd, 0x13, 0x8c, 0xd4, 0xf9, 0x37, 0x5f, 0x52, 0xb6, 0xf4, 0xdb, 0xc1, 0xc2, 0x77, 0xc7, + 0x24, 0x96, 0x47, 0xb7, 0x28, 0x71, 0x42, 0xc6, 0x88, 0x19, 0x24, 0xf1, 0x8e, 0x87, 0x87, 0xea, + 0xd7, 0xdd, 0x8e, 0x08, 0x36, 0x1d, 0x9f, 0x23, 0xc7, 0x4a, 0x31, 0xf5, 0x43, 0x05, 0xcc, 0xdc, + 0x5e, 0x5b, 0x2a, 0x0b, 0x88, 0x35, 0xbb, 0x46, 0x8d, 0x5d, 0x78, 0x1d, 0x64, 0xbc, 0xdd, 0x86, + 0x9f, 0x1a, 0x97, 0x7d, 0x12, 0xac, 0xef, 0x36, 0x58, 0x6a, 0x9c, 0x4d, 0xca, 0xb3, 0x71, 0xc4, + 0x35, 0xe0, 0x2f, 0xc0, 0x70, 0x8b, 0xd9, 0xe5, 0x4b, 0x1d, 0xd6, 0x27, 0xa5, 0xea, 0x30, 0x5f, + 0x0c, 0x12, 0x73, 0xf0, 0x06, 0x98, 0x6c, 0x10, 0x87, 0xda, 0x66, 0x99, 0x18, 0xb6, 0x65, 0xba, + 0x9c, 0x44, 0xc3, 0xfa, 0x39, 0x29, 0x3c, 0xb9, 0x16, 0x9d, 0x44, 0x71, 0x59, 0xf5, 0x83, 0x14, + 0x98, 0x0e, 0x17, 0x80, 0x9a, 0x35, 0xe2, 0xc2, 0x47, 0x60, 0xce, 0xf5, 0x70, 0x85, 0xd6, 0xe8, + 0x53, 0xec, 0x51, 0xdb, 0xfa, 0x07, 0xb5, 0x4c, 0xfb, 0x71, 0x1c, 0x3d, 0xdf, 0x69, 0x17, 0xe6, + 0xca, 0x7b, 0x4a, 0xa1, 0x7d, 0x10, 0xe0, 0x5d, 0x30, 0xe1, 0x92, 0x1a, 0x31, 0x3c, 0xe1, 0xaf, + 0x8c, 0xcb, 0xaf, 0x3a, 0xed, 0xc2, 0x44, 0x39, 0x32, 0xfe, 0xa6, 0x5d, 0x38, 0x13, 0x0b, 0x8c, + 0x98, 0x44, 0x31, 0x65, 0xf8, 0x08, 0x8c, 0x35, 0xd8, 0x2f, 0x4a, 0xdc, 0x5c, 0x6a, 0x3e, 0x3d, + 0x08, 0x57, 0x92, 0x01, 0xd7, 0x67, 0x64, 0xa8, 0xc6, 0xd6, 0x24, 0x12, 0x0a, 0x30, 0xd5, 0x4f, + 0x53, 0xe0, 0xc2, 0x6d, 0xdb, 0xa1, 0x4f, 0x59, 0x55, 0xa8, 0xad, 0xd9, 0xe6, 0x92, 0x44, 0x24, + 0x0e, 0xfc, 0x37, 0x18, 0x63, 0xe7, 0x90, 0x89, 0x3d, 0xdc, 0x83, 0xa7, 0xc1, 0x71, 0xa2, 0x35, + 0x76, 0xaa, 0x6c, 0xc0, 0xd5, 0x98, 0xb4, 0xd6, 0x5a, 0xd0, 0x44, 0x21, 0x59, 0x25, 0x1e, 0x0e, + 0x73, 0x3d, 0x1c, 0x43, 0x01, 0x2a, 0x7c, 0x08, 0x32, 0x6e, 0x83, 0x18, 0x92, 0xaa, 0x37, 0xfa, + 0x7a, 0xd6, 0x7b, 0xa1, 0xe5, 0x06, 0x31, 0xc2, 0xe2, 0xc3, 0xbe, 0x10, 0x87, 0x85, 0x04, 0x8c, + 0xb8, 0x9c, 0xd2, 0x7c, 0x57, 0xb3, 0xa5, 0x3f, 0x1d, 0xd6, 0x80, 0xc8, 0x8b, 0x20, 0xe7, 0xc4, + 0x37, 0x92, 0xe0, 0xea, 0x37, 0x0a, 0x28, 0xec, 0xa1, 0xa9, 0x93, 0x6d, 0xdc, 0xa2, 0xb6, 0x03, + 0x37, 0xc0, 0x28, 0x1f, 0x79, 0xd0, 0x90, 0xa1, 0x2c, 0x0e, 0xbe, 0x8d, 0x9c, 0xb6, 0x7a, 0x96, + 0x65, 0x64, 0x59, 0x60, 0x20, 0x1f, 0x0c, 0x6e, 0x82, 0x71, 0xfe, 0xf3, 0xa6, 0xfd, 0xd8, 0x92, + 0x61, 0x3c, 0x30, 0xf2, 0x24, 0x3b, 0x21, 0xca, 0x3e, 0x0a, 0x0a, 0x01, 0xd5, 0xb7, 0xd3, 0x60, + 0x7e, 0x0f, 0xcf, 0x96, 0x6d, 0xcb, 0xa4, 0x8c, 0xfc, 0xf0, 0x76, 0x2c, 0xff, 0x17, 0x13, 0xf9, + 0x7f, 0xb9, 0x9f, 0x7e, 0xa4, 0x1e, 0xac, 0x04, 0xfb, 0x95, 0x8a, 0x61, 0xc9, 0x80, 0xbf, 0x69, + 0x17, 0x7a, 0xf4, 0x63, 0x5a, 0x80, 0x14, 0xdf, 0x16, 0xd8, 0x02, 0xb0, 0x86, 0x5d, 0x6f, 0xdd, + 0xc1, 0x96, 0x2b, 0x2c, 0xd1, 0x3a, 0x91, 0x4c, 0xb8, 0x3a, 0x18, 0x91, 0x99, 0x86, 0x3e, 0x27, + 0x57, 0x01, 0x57, 0xba, 0xd0, 0x50, 0x0f, 0x0b, 0xf0, 0x97, 0x60, 0xc4, 0x21, 0xd8, 0xb5, 0xad, + 0x5c, 0x86, 0x7b, 0x11, 0xd0, 0x06, 0xf1, 0x51, 0x24, 0x67, 0xe1, 0xaf, 0xc1, 0x68, 0x9d, 0xb8, + 0x2e, 0xae, 0x92, 0xdc, 0x30, 0x17, 0x0c, 0xea, 0xee, 0xaa, 0x18, 0x46, 0xfe, 0xbc, 0xfa, 0xad, + 0x02, 0x2e, 0xee, 0x11, 0xc7, 0x15, 0xea, 0x7a, 0x70, 0xb3, 0x2b, 0x53, 0xb5, 0xc1, 0x1c, 0x64, + 0xda, 0x3c, 0x4f, 0x83, 0x1a, 0xe1, 0x8f, 0x44, 0xb2, 0x74, 0x13, 0x0c, 0x53, 0x8f, 0xd4, 0xfd, + 0x02, 0xf4, 0xc7, 0x43, 0x66, 0x51, 0x58, 0xdf, 0xef, 0x30, 0x34, 0x24, 0x40, 0xd5, 0xe7, 0xe9, + 0x3d, 0x7d, 0x63, 0xa9, 0x0c, 0xff, 0x0b, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0, + 0x6f, 0xb5, 0xd8, 0xa7, 0xb5, 0xd1, 0xcf, 0xcb, 0xa5, 0x4c, 0x95, 0x63, 0xd0, 0x28, 0x61, 0x0a, + 0x2e, 0x80, 0x6c, 0x9d, 0x5a, 0x88, 0x34, 0x6a, 0xd4, 0xc0, 0xae, 0x3c, 0xa7, 0xa6, 0x3b, 0xed, + 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x7b, 0x90, 0xad, 0xe3, 0x27, 0x81, 0x8a, 0x38, + 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x03, 0xc6, 0x06, 0x76, 0x12, + 0xbb, 0xb9, 0x0c, 0x0f, 0xf3, 0xd5, 0xc1, 0x0e, 0x6e, 0x5e, 0xfc, 0x22, 0xcc, 0xe1, 0x10, 0xc8, + 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x7c, 0xd8, 0xed, 0x93, + 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xe3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01, + 0x85, 0x7f, 0x03, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x96, 0xb8, 0x6f, 0xb0, 0xa6, 0x90, + 0x6d, 0x67, 0x5a, 0x3f, 0xcf, 0x32, 0xec, 0x7e, 0xd7, 0x2c, 0xea, 0xa1, 0x01, 0x0d, 0x30, 0xc9, + 0xf2, 0x4e, 0xec, 0x1d, 0x95, 0xfd, 0xe7, 0xc1, 0x92, 0x7a, 0x96, 0xb5, 0x0e, 0x2b, 0x51, 0x10, + 0x14, 0xc7, 0x84, 0x4b, 0x60, 0x5a, 0xb6, 0x3d, 0x89, 0xbd, 0xbc, 0x20, 0x83, 0x3d, 0xbd, 0x1c, + 0x9f, 0x46, 0x49, 0x79, 0x06, 0x61, 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0x9b, + 0xf1, 0x69, 0x94, 0x94, 0x87, 0x35, 0x30, 0x25, 0x51, 0xe5, 0xd6, 0xe6, 0x86, 0x39, 0x3b, 0x06, + 0x6c, 0x50, 0xe5, 0xc9, 0x15, 0xd0, 0x7d, 0x39, 0x86, 0x85, 0x12, 0xd8, 0xd0, 0x03, 0xc0, 0xf0, + 0xab, 0xa9, 0x9b, 0x1b, 0xe1, 0x96, 0xfe, 0x72, 0x48, 0xbe, 0x04, 0x65, 0x39, 0xec, 0x01, 0x82, + 0x21, 0x17, 0x45, 0xec, 0xa8, 0xef, 0x29, 0x60, 0x26, 0xd9, 0xe0, 0x06, 0x57, 0x0b, 0x65, 0xcf, + 0xab, 0xc5, 0x43, 0x30, 0x26, 0x5a, 0x25, 0xdb, 0x91, 0x04, 0xf8, 0xdd, 0x80, 0x45, 0x0f, 0x57, + 0x48, 0xad, 0x2c, 0x55, 0x05, 0x9d, 0xfd, 0x2f, 0x14, 0x40, 0xaa, 0x1f, 0x65, 0x00, 0x08, 0x53, + 0x0c, 0x2e, 0xc6, 0x4e, 0xb9, 0xf9, 0xc4, 0x29, 0x37, 0x13, 0xbd, 0xa7, 0x44, 0x4e, 0xb4, 0x0d, + 0x30, 0x62, 0xf3, 0xd2, 0x23, 0x57, 0x58, 0xea, 0x17, 0xcc, 0xa0, 0x4d, 0x0a, 0xd0, 0x74, 0xc0, + 0xce, 0x0e, 0x59, 0xc0, 0x24, 0x1a, 0xbc, 0x07, 0x32, 0x0d, 0xdb, 0xf4, 0xfb, 0x9a, 0xbe, 0x2d, + 0xe1, 0x9a, 0x6d, 0xba, 0x31, 0xcc, 0x31, 0xb6, 0x76, 0x36, 0x8a, 0x38, 0x0e, 0x6b, 0x33, 0xfd, + 0x97, 0x0a, 0x4e, 0xd1, 0x6c, 0x69, 0xb1, 0x1f, 0x66, 0xaf, 0x47, 0x01, 0x11, 0x4c, 0x7f, 0x06, + 0x05, 0x98, 0xf0, 0x2d, 0x05, 0xcc, 0x1a, 0xc9, 0x0b, 0x76, 0x6e, 0x74, 0xb0, 0xae, 0x6c, 0xdf, + 0x77, 0x08, 0xfd, 0x5c, 0xa7, 0x5d, 0x98, 0xed, 0x12, 0x41, 0xdd, 0xe6, 0x98, 0x93, 0x44, 0xde, + 0xc6, 0x64, 0x2d, 0xec, 0xeb, 0x64, 0xaf, 0x6b, 0xa8, 0x70, 0xd2, 0x9f, 0x41, 0x01, 0xa6, 0xfa, + 0x2c, 0x03, 0x26, 0x62, 0xd7, 0xbc, 0x9f, 0x83, 0x33, 0x22, 0xe1, 0x8f, 0x96, 0x33, 0x02, 0xf3, + 0xe8, 0x39, 0x23, 0x70, 0x4f, 0x94, 0x33, 0xc2, 0xe4, 0x49, 0x72, 0x26, 0xe2, 0x64, 0x0f, 0xce, + 0x7c, 0x9e, 0xf2, 0x39, 0x23, 0x9a, 0x8e, 0xc1, 0x38, 0x23, 0x64, 0x23, 0x9c, 0xb9, 0x1f, 0xbd, + 0x49, 0xf7, 0xe9, 0xfe, 0x34, 0x3f, 0xc2, 0xda, 0xdf, 0x9b, 0xd8, 0xf2, 0xa8, 0xb7, 0xab, 0x8f, + 0x77, 0xdd, 0xba, 0x4d, 0x30, 0x81, 0x5b, 0xc4, 0xc1, 0x55, 0xc2, 0x87, 0x25, 0x69, 0x0e, 0x8a, + 0x3b, 0xc3, 0x2e, 0xbd, 0x4b, 0x11, 0x1c, 0x14, 0x43, 0x65, 0x0d, 0x81, 0xfc, 0x7e, 0xe0, 0x05, + 0xb7, 0x69, 0x79, 0x46, 0xf2, 0x86, 0x60, 0xa9, 0x6b, 0x16, 0xf5, 0xd0, 0x50, 0xdf, 0x4d, 0x81, + 0xd9, 0xae, 0x77, 0x8c, 0x30, 0x28, 0xca, 0x31, 0x05, 0x25, 0x75, 0x82, 0x41, 0x49, 0x1f, 0x38, + 0x28, 0x5f, 0xa4, 0x00, 0xec, 0x3e, 0x4e, 0xe0, 0xff, 0x78, 0x53, 0x62, 0x38, 0xb4, 0x42, 0x4c, + 0x31, 0x7d, 0x14, 0x0d, 0x75, 0xb4, 0xa3, 0x89, 0x62, 0xa3, 0xa4, 0xb1, 0x63, 0x7a, 0xf2, 0x0d, + 0x5f, 0xd4, 0xd2, 0x47, 0xfb, 0xa2, 0xa6, 0x7e, 0x95, 0x0c, 0xe3, 0xa9, 0x7e, 0xc2, 0xeb, 0xb5, + 0xfd, 0xe9, 0x13, 0xdc, 0x7e, 0xf5, 0x33, 0x05, 0xcc, 0x24, 0xdb, 0x91, 0x53, 0xf7, 0xb0, 0xfb, + 0x65, 0xdc, 0x89, 0xd3, 0xfd, 0xa8, 0xfb, 0x4c, 0x01, 0x67, 0x4f, 0xd9, 0x3f, 0x3c, 0xea, 0x27, + 0xdd, 0x6b, 0x3e, 0x2d, 0xff, 0xd3, 0xe8, 0xd7, 0x5e, 0xbc, 0xce, 0x0f, 0xbd, 0x7c, 0x9d, 0x1f, + 0x7a, 0xf5, 0x3a, 0x3f, 0xf4, 0xff, 0x4e, 0x5e, 0x79, 0xd1, 0xc9, 0x2b, 0x2f, 0x3b, 0x79, 0xe5, + 0x55, 0x27, 0xaf, 0x7c, 0xd7, 0xc9, 0x2b, 0xef, 0x7c, 0x9f, 0x1f, 0xfa, 0xd7, 0xa8, 0x84, 0xfe, + 0x29, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x79, 0x7e, 0xc9, 0x1b, 0x1d, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { @@ -1408,6 +1557,18 @@ func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1484,6 +1645,18 @@ func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.External != nil { { size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) @@ -1928,6 +2101,36 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CrossVersionObjectReference) Size() (n int) { if m == nil { return 0 @@ -2166,6 +2369,10 @@ func (m *MetricSpec) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2193,6 +2400,10 @@ func (m *MetricStatus) Size() (n int) { l = m.External.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2326,6 +2537,30 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} func (this *CrossVersionObjectReference) String() string { if this == nil { return "nil" @@ -2507,6 +2742,7 @@ func (this *MetricSpec) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, `}`, }, "") return s @@ -2521,6 +2757,7 @@ func (this *MetricStatus) String() string { `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2618,13 +2855,307 @@ func (this *ResourceMetricStatus) String() string { }, "") return s } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2757,10 +3288,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2876,10 +3404,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2995,10 +3520,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3118,10 +3640,7 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3258,10 +3777,7 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3410,10 +3926,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3535,10 +4048,7 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3749,10 +4259,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3869,10 +4376,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4064,10 +4568,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4279,10 +4780,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4400,10 +4898,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4623,16 +5118,49 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4852,16 +5380,49 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5035,10 +5596,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5180,10 +5738,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5332,10 +5887,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5484,10 +6036,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5603,10 +6152,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5722,10 +6268,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5840,10 +6383,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5958,10 +6498,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 24dc5882e..77a6cb379 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.autoscaling.v2beta2; @@ -30,6 +30,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2beta2"; +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // Container is the name of the container in the pods of the scaling target + optional string container = 3; +} + // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" @@ -256,8 +290,10 @@ message MetricIdentifier { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -279,6 +315,15 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -290,8 +335,10 @@ message MetricSpec { // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -313,6 +360,14 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 6e5b8f68c..ac6cb6769 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -96,8 +96,10 @@ type CrossVersionObjectReference struct { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -116,6 +118,14 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -169,7 +179,7 @@ type HPAScalingRules struct { // - For scale up: 0 (i.e. no stabilization is done). // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional @@ -220,6 +230,12 @@ const ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" // ExternalMetricSourceType is a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -263,6 +279,22 @@ type ResourceMetricSource struct { Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + // ExternalMetricSource indicates how to scale on a metric not associated with // any Kubernetes object (for example length of queue in cloud // messaging service, or QPS from loadbalancer running outside of cluster). @@ -382,8 +414,10 @@ type HorizontalPodAutoscalerCondition struct { // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -402,6 +436,13 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -443,6 +484,20 @@ type ResourceMetricStatus struct { Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // Container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + // ExternalMetricStatus indicates the current value of a global metric // not associated with any Kubernetes object. type ExternalMetricStatus struct { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 3f38880f9..e3ea3002b 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -27,6 +27,28 @@ package v2beta2 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", + "container": "Container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", @@ -162,12 +184,13 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { } var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -175,12 +198,13 @@ func (MetricSpec) SwaggerDoc() map[string]string { } var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index ca26fe920..81642822a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -25,6 +25,40 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in @@ -340,6 +374,11 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { *out = new(ResourceMetricSource) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricSource) @@ -376,6 +415,11 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { *out = new(ResourceMetricStatus) (*in).DeepCopyInto(*out) } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } if in.External != nil { in, out := &in.External, &out.External *out = new(ExternalMetricStatus) diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 35944e726..dc43514c6 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -924,10 +924,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1171,10 +1168,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1291,10 +1285,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1534,10 +1525,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1750,10 +1738,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 75de45f17..7548c04dc 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v1; @@ -151,6 +151,7 @@ message JobSpec { // JobStatus represents the current state of a Job. message JobStatus { // The latest available observations of an object's current state. + // When a job fails, one of the conditions will have type == "Failed". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -166,6 +167,7 @@ message JobStatus { // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. + // The completion time is only set when the job finishes successfully. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 646a3cd7e..fd478874a 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -131,6 +131,7 @@ type JobSpec struct { // JobStatus represents the current state of a Job. type JobStatus struct { // The latest available observations of an object's current state. + // When a job fails, one of the conditions will have type == "Failed". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -146,6 +147,7 @@ type JobStatus struct { // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. + // The completion time is only set when the job finishes successfully. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 0120e07d4..0d8003a72 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -80,9 +80,9 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "conditions": "The latest available observations of an object's current state. When a job fails, one of the conditions will have type == \"Failed\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", "active": "The number of actively running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 69c4054bf..1d1f5f009 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -927,10 +927,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1047,10 +1044,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1278,10 +1272,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1401,10 +1392,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1520,10 +1508,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1639,10 +1624,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 995b4f3f9..4dab09a52 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v1beta1; diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 3e58dbb92..16fb819cd 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -927,10 +927,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1047,10 +1044,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1278,10 +1272,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1401,10 +1392,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1520,10 +1508,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1639,10 +1624,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 0bba13b86..f538d50cd 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.batch.v2alpha1; diff --git a/vendor/k8s.io/api/certificates/v1/generated.pb.go b/vendor/k8s.io/api/certificates/v1/generated.pb.go index d2cf41a25..fca7d2115 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1/generated.pb.go @@ -989,10 +989,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1236,10 +1233,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1356,10 +1350,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1683,7 +1674,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1732,10 +1723,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1853,10 +1841,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1938,10 +1923,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 8427424a8..839c1aa87 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.certificates.v1; diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 1729931b8..f21256f48 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -993,10 +993,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1240,10 +1237,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1360,10 +1354,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1687,7 +1678,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1737,10 +1728,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1858,10 +1846,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1943,10 +1928,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 2fb4dc4ec..73631fb77 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.certificates.v1beta1; diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 22c3d624e..d5ed0f27c 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -554,10 +554,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -674,10 +671,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -872,10 +866,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 4206746d8..4d887850d 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.coordination.v1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 57a314cfd..bcd00d445 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -554,10 +554,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -674,10 +671,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -872,10 +866,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index cfc2711c6..10b485e30 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.coordination.v1beta1; diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 9b29b21e5..54098aa70 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -4025,10 +4025,38 @@ func (m *PodTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortStatus.Merge(m, src) +} +func (m *PortStatus) XXX_Size() int { + return m.Size() +} +func (m *PortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PortStatus proto.InternalMessageInfo + func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6006,6 +6034,7 @@ func init() { proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortStatus)(nil), "k8s.io.api.core.v1.PortStatus") proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") @@ -6087,876 +6116,882 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 13889 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0xd7, - 0x75, 0x18, 0xac, 0x9e, 0xc1, 0x63, 0xe6, 0xe0, 0x7d, 0xb1, 0xbb, 0xc4, 0x82, 0xbb, 0x8b, 0x65, - 0xaf, 0xb4, 0x5c, 0x8a, 0x24, 0x56, 0x7c, 0x89, 0x34, 0x49, 0xd1, 0x02, 0x30, 0xc0, 0xee, 0x70, - 0x17, 0xd8, 0xe1, 0x1d, 0xec, 0xae, 0x44, 0x53, 0xfa, 0xd4, 0x98, 0xb9, 0x00, 0x9a, 0x98, 0xe9, - 0x1e, 0x76, 0xf7, 0x60, 0x17, 0xfc, 0xe4, 0xfa, 0xfc, 0xc9, 0x4f, 0xf9, 0x91, 0x52, 0xa5, 0x5c, - 0x79, 0xd8, 0x2e, 0x57, 0xca, 0x71, 0xca, 0x56, 0x9c, 0xa4, 0xe2, 0xd8, 0xb1, 0x1d, 0xcb, 0x89, - 0x9d, 0x38, 0x0f, 0x27, 0x3f, 0x1c, 0xc7, 0x95, 0x44, 0xae, 0x72, 0x05, 0xb1, 0xd7, 0xa9, 0xb8, - 0xf4, 0x23, 0xb6, 0x13, 0x3b, 0x3f, 0x82, 0xb8, 0xe2, 0xd4, 0x7d, 0xf6, 0xbd, 0x3d, 0xdd, 0x33, - 0x83, 0x25, 0x00, 0x51, 0x2a, 0xfe, 0x9b, 0xb9, 0xe7, 0xdc, 0x73, 0x6f, 0xdf, 0xe7, 0xb9, 0xe7, - 0x09, 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xbb, 0xfe, 0xd5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, - 0xf0, 0xea, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x55, 0x00, 0x9c, 0x96, 0x7b, 0xb5, 0xe6, 0x07, 0xe4, - 0xea, 0xee, 0x33, 0x57, 0xb7, 0x88, 0x47, 0x02, 0x27, 0x22, 0xf5, 0xf9, 0x56, 0xe0, 0x47, 0x3e, - 0x42, 0x1c, 0x67, 0xde, 0x69, 0xb9, 0xf3, 0x14, 0x67, 0x7e, 0xf7, 0x99, 0xd9, 0xa7, 0xb7, 0xdc, - 0x68, 0xbb, 0xbd, 0x31, 0x5f, 0xf3, 0x9b, 0x57, 0xb7, 0xfc, 0x2d, 0xff, 0x2a, 0x43, 0xdd, 0x68, - 0x6f, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0x3e, 0x1f, 0x37, 0xd3, 0x74, 0x6a, 0xdb, - 0xae, 0x47, 0x82, 0xbd, 0xab, 0xad, 0x9d, 0x2d, 0xd6, 0x6e, 0x40, 0x42, 0xbf, 0x1d, 0xd4, 0x48, - 0xb2, 0xe1, 0xae, 0xb5, 0xc2, 0xab, 0x4d, 0x12, 0x39, 0x29, 0xdd, 0x9d, 0xbd, 0x9a, 0x55, 0x2b, - 0x68, 0x7b, 0x91, 0xdb, 0xec, 0x6c, 0xe6, 0xe3, 0xbd, 0x2a, 0x84, 0xb5, 0x6d, 0xd2, 0x74, 0x3a, - 0xea, 0x3d, 0x97, 0x55, 0xaf, 0x1d, 0xb9, 0x8d, 0xab, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, - 0x7f, 0xd5, 0x82, 0x8b, 0x0b, 0x77, 0xab, 0xcb, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, - 0xed, 0x54, 0x23, 0x3f, 0x20, 0x77, 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0x53, 0x50, - 0xd8, 0x65, 0xff, 0xcb, 0xa5, 0x19, 0xeb, 0xa2, 0x75, 0xa5, 0xb8, 0x38, 0xf9, 0x1b, 0xfb, 0x73, - 0x1f, 0x7a, 0xb0, 0x3f, 0x57, 0xb8, 0x23, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x68, 0x33, 0x5c, - 0xdf, 0x6b, 0x91, 0x99, 0x1c, 0xc3, 0x1d, 0x17, 0xb8, 0x43, 0x2b, 0x55, 0x5a, 0x8a, 0x05, 0x14, - 0x5d, 0x85, 0x62, 0xcb, 0x09, 0x22, 0x37, 0x72, 0x7d, 0x6f, 0x26, 0x7f, 0xd1, 0xba, 0x32, 0xb8, - 0x38, 0x25, 0x50, 0x8b, 0x15, 0x09, 0xc0, 0x31, 0x0e, 0xed, 0x46, 0x40, 0x9c, 0xfa, 0x2d, 0xaf, - 0xb1, 0x37, 0x33, 0x70, 0xd1, 0xba, 0x52, 0x88, 0xbb, 0x81, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8f, - 0xe4, 0xa0, 0xb0, 0xb0, 0xb9, 0xe9, 0x7a, 0x6e, 0xb4, 0x87, 0xee, 0xc0, 0xa8, 0xe7, 0xd7, 0x89, - 0xfc, 0xcf, 0xbe, 0x62, 0xe4, 0xd9, 0x8b, 0xf3, 0x9d, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, - 0x7c, 0xb0, 0x3f, 0x37, 0xaa, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd2, 0xf2, 0xeb, 0x8a, 0x6c, - 0x8e, 0x91, 0x9d, 0x4b, 0x23, 0x5b, 0x89, 0xd1, 0x16, 0x27, 0x1e, 0xec, 0xcf, 0x8d, 0x68, 0x05, - 0x58, 0x27, 0x82, 0x36, 0x60, 0x82, 0xfe, 0xf5, 0x22, 0x57, 0xd1, 0xcd, 0x33, 0xba, 0x97, 0xb2, - 0xe8, 0x6a, 0xa8, 0x8b, 0xd3, 0x0f, 0xf6, 0xe7, 0x26, 0x12, 0x85, 0x38, 0x49, 0xd0, 0x7e, 0x17, - 0xc6, 0x17, 0xa2, 0xc8, 0xa9, 0x6d, 0x93, 0x3a, 0x9f, 0x41, 0xf4, 0x3c, 0x0c, 0x78, 0x4e, 0x93, - 0x88, 0xf9, 0xbd, 0x28, 0x06, 0x76, 0x60, 0xcd, 0x69, 0x92, 0x83, 0xfd, 0xb9, 0xc9, 0xdb, 0x9e, - 0xfb, 0x4e, 0x5b, 0xac, 0x0a, 0x5a, 0x86, 0x19, 0x36, 0x7a, 0x16, 0xa0, 0x4e, 0x76, 0xdd, 0x1a, - 0xa9, 0x38, 0xd1, 0xb6, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x49, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x87, - 0xe2, 0xc2, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x3d, 0x44, 0x3b, 0x30, 0xd1, 0x0a, 0xc8, 0x26, 0x09, - 0x54, 0xd1, 0x8c, 0x75, 0x31, 0x7f, 0x65, 0xe4, 0xd9, 0x2b, 0xa9, 0x1f, 0x6b, 0xa2, 0x2e, 0x7b, - 0x51, 0xb0, 0xb7, 0xf8, 0x88, 0x68, 0x6f, 0x22, 0x01, 0xc5, 0x49, 0xca, 0xf6, 0x3f, 0xcf, 0xc1, - 0xe9, 0x85, 0x77, 0xdb, 0x01, 0x29, 0xb9, 0xe1, 0x4e, 0x72, 0x85, 0xd7, 0xdd, 0x70, 0x67, 0x2d, - 0x1e, 0x01, 0xb5, 0xb4, 0x4a, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x34, 0x0c, 0xd3, 0xdf, 0xb7, 0x71, - 0x59, 0x7c, 0xf2, 0xb4, 0x40, 0x1e, 0x29, 0x39, 0x91, 0x53, 0xe2, 0x20, 0x2c, 0x71, 0xd0, 0x2a, - 0x8c, 0xd4, 0xd8, 0x86, 0xdc, 0x5a, 0xf5, 0xeb, 0x84, 0x4d, 0x66, 0x71, 0xf1, 0x49, 0x8a, 0xbe, - 0x14, 0x17, 0x1f, 0xec, 0xcf, 0xcd, 0xf0, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, 0xad, - 0xf6, 0xd7, 0x00, 0xa3, 0x04, 0x29, 0x7b, 0xeb, 0x8a, 0xb6, 0x55, 0x06, 0xd9, 0x56, 0x19, 0x4d, - 0xdf, 0x26, 0xe8, 0x19, 0x18, 0xd8, 0x71, 0xbd, 0xfa, 0xcc, 0x10, 0xa3, 0x75, 0x9e, 0xce, 0xf9, - 0x0d, 0xd7, 0xab, 0x1f, 0xec, 0xcf, 0x4d, 0x19, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0x7f, 0x6a, - 0xc1, 0x1c, 0x83, 0xad, 0xb8, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, - 0x9f, 0x05, 0x08, 0x49, 0x2d, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x15, 0x04, 0x6b, 0x58, - 0xf4, 0x40, 0x08, 0xb7, 0x9d, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x40, 0xa8, 0x4a, 0x00, 0x8e, - 0x71, 0x8c, 0x03, 0x21, 0xdf, 0xeb, 0x40, 0x40, 0x9f, 0x80, 0x89, 0xb8, 0xb1, 0xb0, 0xe5, 0xd4, - 0xe4, 0x00, 0xb2, 0x2d, 0x53, 0x35, 0x41, 0x38, 0x89, 0x6b, 0xff, 0x6d, 0x4b, 0x2c, 0x1e, 0xfa, - 0xd5, 0xef, 0xf3, 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xe1, 0x45, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, - 0x1c, 0x14, 0xe8, 0xdd, 0x54, 0x77, 0x22, 0x47, 0x9c, 0x7b, 0x1f, 0xd3, 0xf6, 0x96, 0xba, 0x2a, - 0xe6, 0x5b, 0x3b, 0x5b, 0xb4, 0x20, 0x9c, 0xa7, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0x78, 0x9b, 0xd4, - 0xa2, 0x55, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x60, 0x28, 0x72, 0x82, - 0x2d, 0x12, 0x89, 0x03, 0x30, 0xf5, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0xab, 0x91, 0xf8, - 0x5a, 0x58, 0x67, 0x55, 0xb1, 0x20, 0x61, 0xff, 0xd0, 0x30, 0x9c, 0x5d, 0xaa, 0x96, 0x33, 0xd6, - 0xd5, 0x65, 0x18, 0xaa, 0x07, 0xee, 0x2e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x25, 0x56, 0x8a, 0x05, - 0x14, 0xbd, 0x04, 0xa3, 0xfc, 0x42, 0xba, 0xee, 0x78, 0xf5, 0x86, 0x1c, 0xe2, 0x53, 0x02, 0x7b, - 0xf4, 0x8e, 0x06, 0xc3, 0x06, 0xe6, 0x21, 0x17, 0xd5, 0xe5, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, - 0x68, 0xc1, 0x24, 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x1d, 0x91, 0x70, 0x66, 0x90, 0x9d, - 0x74, 0x4b, 0x69, 0xa3, 0x95, 0x39, 0x02, 0xf3, 0x77, 0x12, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, - 0x9d, 0x4c, 0x82, 0x71, 0x47, 0xb3, 0xe8, 0x3b, 0x2d, 0x98, 0xad, 0xf9, 0x5e, 0x14, 0xf8, 0x8d, - 0x06, 0x09, 0x2a, 0xed, 0x8d, 0x86, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, - 0xe6, 0x50, 0x21, 0x89, 0x39, 0xbc, 0xf0, 0x60, 0x7f, 0x6e, 0x76, 0x29, 0x93, 0x14, 0xee, 0xd2, - 0x0c, 0xda, 0x01, 0x44, 0xaf, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf1, 0xe1, 0xfe, 0x1b, 0x3f, - 0xf3, 0x60, 0x7f, 0x0e, 0xad, 0x75, 0x90, 0xc0, 0x29, 0x64, 0xd1, 0x3b, 0x70, 0x8a, 0x96, 0x76, - 0x7c, 0x6b, 0xa1, 0xff, 0xe6, 0x66, 0x1e, 0xec, 0xcf, 0x9d, 0x5a, 0x4b, 0x21, 0x82, 0x53, 0x49, - 0xa3, 0xef, 0xb0, 0xe0, 0x6c, 0xfc, 0xf9, 0xcb, 0xf7, 0x5b, 0x8e, 0x57, 0x8f, 0x1b, 0x2e, 0xf6, - 0xdf, 0x30, 0x3d, 0x93, 0xcf, 0x2e, 0x65, 0x51, 0xc2, 0xd9, 0x8d, 0xcc, 0x2e, 0xc1, 0xe9, 0xd4, - 0xd5, 0x82, 0x26, 0x21, 0xbf, 0x43, 0x38, 0x17, 0x54, 0xc4, 0xf4, 0x27, 0x3a, 0x05, 0x83, 0xbb, - 0x4e, 0xa3, 0x2d, 0x36, 0x0a, 0xe6, 0x7f, 0x5e, 0xce, 0xbd, 0x64, 0xd9, 0xff, 0x22, 0x0f, 0x13, - 0x4b, 0xd5, 0xf2, 0x43, 0xed, 0x42, 0xfd, 0x1a, 0xca, 0x75, 0xbd, 0x86, 0xe2, 0x4b, 0x2d, 0x9f, - 0x79, 0xa9, 0xfd, 0x7f, 0x29, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd8, 0x42, 0x47, 0xbc, - 0x71, 0x76, 0x33, 0x56, 0xd1, 0x20, 0x9b, 0xcc, 0x54, 0x8e, 0xe5, 0xa6, 0x5f, 0x73, 0x1a, 0xc9, - 0xa3, 0xef, 0x90, 0x4b, 0xe9, 0x68, 0xe6, 0xb1, 0x06, 0xa3, 0x4b, 0x4e, 0xcb, 0xd9, 0x70, 0x1b, - 0x6e, 0xe4, 0x92, 0x10, 0x3d, 0x0e, 0x79, 0xa7, 0x5e, 0x67, 0xdc, 0x56, 0x71, 0xf1, 0xf4, 0x83, - 0xfd, 0xb9, 0xfc, 0x42, 0x9d, 0x5e, 0xfb, 0xa0, 0xb0, 0xf6, 0x30, 0xc5, 0x40, 0x1f, 0x85, 0x81, - 0x7a, 0xe0, 0xb7, 0x66, 0x72, 0x0c, 0x93, 0xee, 0xba, 0x81, 0x52, 0xe0, 0xb7, 0x12, 0xa8, 0x0c, - 0xc7, 0xfe, 0xb5, 0x1c, 0x9c, 0x5b, 0x22, 0xad, 0xed, 0x95, 0x6a, 0xc6, 0xf9, 0x7d, 0x05, 0x0a, - 0x4d, 0xdf, 0x73, 0x23, 0x3f, 0x08, 0x45, 0xd3, 0x6c, 0x45, 0xac, 0x8a, 0x32, 0xac, 0xa0, 0xe8, - 0x22, 0x0c, 0xb4, 0x62, 0xa6, 0x72, 0x54, 0x32, 0xa4, 0x8c, 0x9d, 0x64, 0x10, 0x8a, 0xd1, 0x0e, - 0x49, 0x20, 0x56, 0x8c, 0xc2, 0xb8, 0x1d, 0x92, 0x00, 0x33, 0x48, 0x7c, 0x33, 0xd3, 0x3b, 0x5b, - 0x9c, 0xd0, 0x89, 0x9b, 0x99, 0x42, 0xb0, 0x86, 0x85, 0x2a, 0x50, 0x0c, 0x13, 0x33, 0xdb, 0xd7, - 0x36, 0x1d, 0x63, 0x57, 0xb7, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0xa3, 0x0c, 0xf5, 0xbc, 0xba, 0xbf, - 0x92, 0x03, 0xc4, 0x87, 0xf0, 0x1b, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xa8, - 0x46, 0xef, 0xcf, 0x2c, 0x38, 0xb7, 0xe4, 0x7a, 0x75, 0x12, 0x64, 0x2c, 0xc0, 0xe3, 0x79, 0xcb, - 0x1e, 0x8e, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x11, 0x2c, 0x31, 0xfb, 0x8f, 0x2d, 0x40, 0xfc, 0xb3, - 0xdf, 0x77, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x60, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xa9, 0xe1, - 0x12, 0x2f, 0x2a, 0x57, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x2f, 0xc3, 0x78, 0xe4, 0x36, 0x89, - 0xdf, 0x8e, 0xaa, 0xa4, 0xe6, 0x7b, 0xec, 0x25, 0x69, 0x5d, 0x19, 0x5c, 0x44, 0x0f, 0xf6, 0xe7, - 0xc6, 0xd7, 0x0d, 0x08, 0x4e, 0x60, 0xda, 0xbf, 0x4b, 0xc7, 0xcf, 0x6f, 0xb6, 0x7c, 0x8f, 0x78, - 0xd1, 0x92, 0xef, 0xd5, 0xb9, 0xc4, 0xe1, 0x65, 0x18, 0x88, 0xe8, 0x78, 0xf0, 0xb1, 0xbb, 0x2c, - 0x37, 0x0a, 0x1d, 0x85, 0x83, 0xfd, 0xb9, 0x33, 0x9d, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0x5b, - 0x60, 0x28, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0xd1, 0x7c, 0x4c, 0x8e, 0x66, 0x95, 0x95, 0x1e, 0xec, - 0xcf, 0x4d, 0xa8, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x09, 0x18, 0x6e, 0x92, 0x30, 0x74, 0xb6, - 0xe4, 0x6d, 0x38, 0x21, 0xea, 0x0e, 0xaf, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x12, 0x0c, 0x92, 0x20, - 0xf0, 0x03, 0xb1, 0x47, 0xc7, 0x04, 0xe2, 0xe0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x5b, 0x0b, - 0x26, 0x54, 0x5f, 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x26, 0x3f, 0x30, 0x64, 0xb7, - 0xc7, 0xc8, 0xb3, 0x97, 0x53, 0x2f, 0xea, 0x8e, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, 0xd4, - 0xec, 0x7f, 0x6c, 0xc1, 0x74, 0xe2, 0x8b, 0x6e, 0xba, 0x61, 0x84, 0xde, 0xea, 0xf8, 0xaa, 0xf9, - 0xfe, 0xbe, 0x8a, 0xd6, 0x66, 0xdf, 0xa4, 0x96, 0xb2, 0x2c, 0xd1, 0xbe, 0xe8, 0x3a, 0x0c, 0xba, - 0x11, 0x69, 0xca, 0x8f, 0xb9, 0xd4, 0xf5, 0x63, 0x78, 0xaf, 0xe2, 0x19, 0x29, 0xd3, 0x9a, 0x98, - 0x13, 0xb0, 0x7f, 0x2d, 0x0f, 0x45, 0xbe, 0x6c, 0x57, 0x9d, 0xd6, 0x09, 0xcc, 0xc5, 0x93, 0x50, - 0x74, 0x9b, 0xcd, 0x76, 0xe4, 0x6c, 0x88, 0xe3, 0xbc, 0xc0, 0xb7, 0x56, 0x59, 0x16, 0xe2, 0x18, - 0x8e, 0xca, 0x30, 0xc0, 0xba, 0xc2, 0xbf, 0xf2, 0xf1, 0xf4, 0xaf, 0x14, 0x7d, 0x9f, 0x2f, 0x39, - 0x91, 0xc3, 0x39, 0x29, 0x75, 0x8f, 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xc3, 0xf5, 0x9c, - 0x60, 0x8f, 0x96, 0xcd, 0xe4, 0x19, 0xc1, 0xa7, 0xbb, 0x13, 0x5c, 0x54, 0xf8, 0x9c, 0xac, 0xfa, - 0xb0, 0x18, 0x80, 0x35, 0xa2, 0xb3, 0x2f, 0x42, 0x51, 0x21, 0x1f, 0x86, 0x21, 0x9a, 0xfd, 0x04, - 0x4c, 0x24, 0xda, 0xea, 0x55, 0x7d, 0x54, 0xe7, 0xa7, 0x7e, 0x99, 0x1d, 0x19, 0xa2, 0xd7, 0xcb, - 0xde, 0xae, 0x38, 0x72, 0xdf, 0x85, 0x53, 0x8d, 0x94, 0x93, 0x4c, 0xcc, 0x6b, 0xff, 0x27, 0xdf, - 0x39, 0xf1, 0xd9, 0xa7, 0xd2, 0xa0, 0x38, 0xb5, 0x0d, 0xca, 0x23, 0xf8, 0x2d, 0xba, 0x41, 0x9c, - 0x86, 0xce, 0x6e, 0xdf, 0x12, 0x65, 0x58, 0x41, 0xe9, 0x79, 0x77, 0x4a, 0x75, 0xfe, 0x06, 0xd9, - 0xab, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x75, 0xed, 0xfe, 0x79, 0x3e, 0xfa, 0xfc, 0xb8, 0x1c, - 0x11, 0x04, 0xf2, 0x37, 0xc8, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xf2, 0x5d, 0xbf, 0xee, 0x67, 0x2d, - 0x18, 0x53, 0x5f, 0x77, 0x02, 0xe7, 0xc2, 0xa2, 0x79, 0x2e, 0x9c, 0xef, 0xba, 0xc0, 0x33, 0x4e, - 0x84, 0xaf, 0xe4, 0xe0, 0xac, 0xc2, 0xa1, 0x6f, 0x03, 0xfe, 0x47, 0xac, 0xaa, 0xab, 0x50, 0xf4, - 0x94, 0xd4, 0xca, 0x32, 0xc5, 0x45, 0xb1, 0xcc, 0x2a, 0xc6, 0xa1, 0x2c, 0x9e, 0x17, 0x8b, 0x96, - 0x46, 0x75, 0x71, 0xae, 0x10, 0xdd, 0x2e, 0x42, 0xbe, 0xed, 0xd6, 0xc5, 0x05, 0xf3, 0x31, 0x39, - 0xda, 0xb7, 0xcb, 0xa5, 0x83, 0xfd, 0xb9, 0xc7, 0xb2, 0x54, 0x09, 0xf4, 0x66, 0x0b, 0xe7, 0x6f, - 0x97, 0x4b, 0x98, 0x56, 0x46, 0x0b, 0x30, 0x21, 0xb5, 0x25, 0x77, 0x28, 0xbb, 0xe5, 0x7b, 0xe2, - 0x1e, 0x52, 0x32, 0x59, 0x6c, 0x82, 0x71, 0x12, 0x1f, 0x95, 0x60, 0x72, 0xa7, 0xbd, 0x41, 0x1a, - 0x24, 0xe2, 0x1f, 0x7c, 0x83, 0x70, 0x89, 0x65, 0x31, 0x7e, 0x99, 0xdd, 0x48, 0xc0, 0x71, 0x47, - 0x0d, 0xfb, 0x2f, 0xd8, 0x7d, 0x20, 0x46, 0xaf, 0x12, 0xf8, 0x74, 0x61, 0x51, 0xea, 0x5f, 0xcf, - 0xe5, 0xdc, 0xcf, 0xaa, 0xb8, 0x41, 0xf6, 0xd6, 0x7d, 0xca, 0x99, 0xa7, 0xaf, 0x0a, 0x63, 0xcd, - 0x0f, 0x74, 0x5d, 0xf3, 0x3f, 0x9f, 0x83, 0xd3, 0x6a, 0x04, 0x0c, 0x26, 0xf0, 0x1b, 0x7d, 0x0c, - 0x9e, 0x81, 0x91, 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x89, 0xcf, 0x07, 0xb9, 0x0a, 0xa5, 0x14, - 0x17, 0x63, 0x1d, 0xe7, 0x10, 0xc3, 0xf6, 0x3f, 0x47, 0xd8, 0x45, 0x1c, 0x39, 0x74, 0x8d, 0xab, - 0x5d, 0x63, 0x65, 0xee, 0x9a, 0x4b, 0x30, 0xe8, 0x36, 0x29, 0x63, 0x96, 0x33, 0xf9, 0xad, 0x32, - 0x2d, 0xc4, 0x1c, 0x86, 0x3e, 0x02, 0xc3, 0x35, 0xbf, 0xd9, 0x74, 0xbc, 0x3a, 0xbb, 0xf2, 0x8a, - 0x8b, 0x23, 0x94, 0x77, 0x5b, 0xe2, 0x45, 0x58, 0xc2, 0xd0, 0x39, 0x18, 0x70, 0x82, 0x2d, 0x2e, - 0xc3, 0x28, 0x2e, 0x16, 0x68, 0x4b, 0x0b, 0xc1, 0x56, 0x88, 0x59, 0x29, 0x7d, 0x82, 0xdd, 0xf3, - 0x83, 0x1d, 0xd7, 0xdb, 0x2a, 0xb9, 0x81, 0xd8, 0x12, 0xea, 0x2e, 0xbc, 0xab, 0x20, 0x58, 0xc3, - 0x42, 0x2b, 0x30, 0xd8, 0xf2, 0x83, 0x28, 0x9c, 0x19, 0x62, 0xc3, 0xfd, 0x58, 0xc6, 0x41, 0xc4, - 0xbf, 0xb6, 0xe2, 0x07, 0x51, 0xfc, 0x01, 0xf4, 0x5f, 0x88, 0x79, 0x75, 0x74, 0x13, 0x86, 0x89, - 0xb7, 0xbb, 0x12, 0xf8, 0xcd, 0x99, 0xe9, 0x6c, 0x4a, 0xcb, 0x1c, 0x85, 0x2f, 0xb3, 0x98, 0x47, - 0x15, 0xc5, 0x58, 0x92, 0x40, 0xdf, 0x02, 0x79, 0xe2, 0xed, 0xce, 0x0c, 0x33, 0x4a, 0xb3, 0x19, - 0x94, 0xee, 0x38, 0x41, 0x7c, 0xe6, 0x2f, 0x7b, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x86, 0xa2, 0x3c, - 0x30, 0x42, 0x21, 0xac, 0x4b, 0x5d, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0x9d, 0xb6, 0x1b, 0x90, 0x26, - 0xf1, 0xa2, 0x30, 0x3e, 0x21, 0x25, 0x34, 0xc4, 0x31, 0x35, 0xf4, 0x69, 0x29, 0x21, 0x5e, 0xf5, - 0xdb, 0x5e, 0x14, 0xce, 0x14, 0x59, 0xf7, 0x52, 0x75, 0x77, 0x77, 0x62, 0xbc, 0xa4, 0x08, 0x99, - 0x57, 0xc6, 0x06, 0x29, 0xf4, 0x19, 0x18, 0xe3, 0xff, 0xb9, 0x06, 0x2c, 0x9c, 0x39, 0xcd, 0x68, - 0x5f, 0xcc, 0xa6, 0xcd, 0x11, 0x17, 0x4f, 0x0b, 0xe2, 0x63, 0x7a, 0x69, 0x88, 0x4d, 0x6a, 0x08, - 0xc3, 0x58, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0x6f, 0x90, 0x19, 0x60, 0x03, 0x73, - 0x36, 0x5d, 0x63, 0xe6, 0x6f, 0x90, 0xc5, 0x29, 0x4a, 0xf3, 0xa6, 0x5e, 0x07, 0x9b, 0x24, 0xd0, - 0x6d, 0x18, 0xa7, 0x2f, 0x36, 0x37, 0x26, 0x3a, 0xd2, 0x8b, 0x28, 0x7b, 0x57, 0x61, 0xa3, 0x12, - 0x4e, 0x10, 0x41, 0xb7, 0x60, 0x34, 0x8c, 0x9c, 0x20, 0x6a, 0xb7, 0x38, 0xd1, 0x33, 0xbd, 0x88, - 0x32, 0x85, 0x6b, 0x55, 0xab, 0x82, 0x0d, 0x02, 0xe8, 0x75, 0x28, 0x36, 0xdc, 0x4d, 0x52, 0xdb, - 0xab, 0x35, 0xc8, 0xcc, 0x28, 0xa3, 0x96, 0x7a, 0xa8, 0xdc, 0x94, 0x48, 0x9c, 0xcf, 0x55, 0x7f, - 0x71, 0x5c, 0x1d, 0xdd, 0x81, 0x33, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0xc3, 0x40, 0x3c, 0xad, - 0x98, 0x22, 0x73, 0x8c, 0xed, 0xb6, 0x0b, 0x62, 0x36, 0xce, 0xac, 0xa7, 0x62, 0xe1, 0x8c, 0xda, - 0xe8, 0x3e, 0xcc, 0xa4, 0x40, 0xfc, 0x86, 0x5b, 0xdb, 0x9b, 0x39, 0xc5, 0x28, 0xbf, 0x2a, 0x28, - 0xcf, 0xac, 0x67, 0xe0, 0x1d, 0x74, 0x81, 0xe1, 0x4c, 0xea, 0xe8, 0x16, 0x4c, 0xb0, 0x13, 0xa8, - 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1, 0x8f, 0xc8, 0xfb, 0xb8, 0x6c, 0x82, 0x0f, 0xf6, - 0xe7, 0x20, 0xfe, 0x87, 0x93, 0xb5, 0xd1, 0x06, 0xd3, 0x99, 0xb5, 0x03, 0x37, 0xda, 0xa3, 0xe7, - 0x06, 0xb9, 0x1f, 0xcd, 0x4c, 0x74, 0x95, 0x57, 0xe8, 0xa8, 0x4a, 0xb1, 0xa6, 0x17, 0xe2, 0x24, - 0x41, 0x7a, 0xa4, 0x86, 0x51, 0xdd, 0xf5, 0x66, 0x26, 0xf9, 0xbb, 0x44, 0x9e, 0x48, 0x55, 0x5a, - 0x88, 0x39, 0x8c, 0xe9, 0xcb, 0xe8, 0x8f, 0x5b, 0xf4, 0xe6, 0x9a, 0x62, 0x88, 0xb1, 0xbe, 0x4c, - 0x02, 0x70, 0x8c, 0x43, 0x99, 0xc9, 0x28, 0xda, 0x9b, 0x41, 0x0c, 0x55, 0x1d, 0x2c, 0xeb, 0xeb, - 0x9f, 0xc6, 0xb4, 0xdc, 0xde, 0x80, 0x71, 0x75, 0x10, 0xb2, 0x31, 0x41, 0x73, 0x30, 0xc8, 0xd8, - 0x27, 0x21, 0x5d, 0x2b, 0xd2, 0x2e, 0x30, 0xd6, 0x0a, 0xf3, 0x72, 0xd6, 0x05, 0xf7, 0x5d, 0xb2, - 0xb8, 0x17, 0x11, 0xfe, 0xa6, 0xcf, 0x6b, 0x5d, 0x90, 0x00, 0x1c, 0xe3, 0xd8, 0xff, 0x87, 0xb3, - 0xa1, 0xf1, 0x69, 0xdb, 0xc7, 0xfd, 0xf2, 0x14, 0x14, 0xb6, 0xfd, 0x30, 0xa2, 0xd8, 0xac, 0x8d, - 0xc1, 0x98, 0xf1, 0xbc, 0x2e, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc0, 0x58, 0x4d, 0x6f, 0x40, 0x5c, - 0x8e, 0xea, 0x18, 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x09, 0x0a, 0xcc, 0x06, 0xa4, 0xe6, 0x37, - 0x04, 0xd7, 0x26, 0x6f, 0xf8, 0x42, 0x45, 0x94, 0x1f, 0x68, 0xbf, 0xb1, 0xc2, 0x46, 0x97, 0x61, - 0x88, 0x76, 0xa1, 0x5c, 0x11, 0xd7, 0x92, 0x12, 0x14, 0x5d, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xbf, - 0x9c, 0xd3, 0x46, 0x99, 0xbe, 0x87, 0x09, 0xaa, 0xc0, 0xf0, 0x3d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, - 0xf0, 0x1f, 0x4f, 0x74, 0xbd, 0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, - 0xc9, 0x50, 0x8a, 0x41, 0xdb, 0xf3, 0x28, 0xc5, 0x5c, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, - 0x3f, 0x58, 0x92, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x46, 0xea, 0xc2, 0xf6, 0xe2, 0xa9, 0xde, 0x44, - 0xd7, 0x55, 0x9d, 0xc5, 0x71, 0x7a, 0x47, 0xc7, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x9f, 0xd6, - 0xd9, 0x19, 0xf4, 0x6d, 0x74, 0x89, 0x3b, 0x41, 0x44, 0xea, 0x0b, 0x91, 0x18, 0x9c, 0x8f, 0xf6, - 0xf7, 0x48, 0x59, 0x77, 0x9b, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x98, 0x87, - 0x99, 0xac, 0xee, 0xd2, 0x45, 0x47, 0xee, 0xbb, 0xd1, 0x12, 0x65, 0xaf, 0x2c, 0x73, 0xd1, 0x2d, - 0x8b, 0x72, 0xac, 0x30, 0xe8, 0xec, 0x87, 0xee, 0x96, 0x7c, 0x63, 0x0e, 0xc6, 0xb3, 0x5f, 0x65, - 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, - 0xa0, 0xba, 0xb4, 0x6b, 0xa0, 0x87, 0xb4, 0xcb, 0x18, 0xa2, 0xc1, 0xa3, 0x1d, 0x22, 0xf4, 0x59, - 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, 0x7d, 0xe8, 0xd0, 0xd4, 0x15, 0x73, 0xb6, 0xa2, 0xa8, - 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x46, 0xd4, 0x06, 0x2c, 0x97, 0x98, 0xa6, 0x53, 0xb3, 0x1c, 0x89, - 0x4f, 0xa3, 0x12, 0xd6, 0xf1, 0xec, 0xb7, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x7e, - 0xc7, 0x37, 0xd7, 0x7d, 0x7c, 0xed, 0xaf, 0xe5, 0x61, 0xc2, 0x68, 0xac, 0x1d, 0xf6, 0x71, 0x66, - 0x5d, 0xa3, 0x07, 0xb8, 0x13, 0x11, 0xb1, 0xff, 0xec, 0xde, 0x5b, 0x45, 0x3f, 0xe4, 0xe9, 0x0e, - 0xe0, 0xf5, 0xd1, 0x67, 0xa1, 0xd8, 0x70, 0x42, 0x26, 0x39, 0x23, 0x62, 0xdf, 0xf5, 0x43, 0x2c, - 0x7e, 0x98, 0x38, 0x61, 0xa4, 0xdd, 0x9a, 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x34, 0x94, 0x3f, 0x91, - 0xd6, 0x63, 0xaa, 0x13, 0x94, 0x89, 0xd9, 0xc3, 0x1c, 0x86, 0x5e, 0x82, 0xd1, 0x80, 0xb0, 0x55, - 0xb1, 0x44, 0xb9, 0x39, 0xb6, 0xcc, 0x06, 0x63, 0xb6, 0x0f, 0x6b, 0x30, 0x6c, 0x60, 0xc6, 0x6f, - 0x83, 0xa1, 0x2e, 0x6f, 0x83, 0x27, 0x60, 0x98, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, - 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x42, 0x7f, 0x0b, 0x86, 0xbe, 0x3e, 0xc4, 0xa2, 0x66, 0x5a, 0xe6, - 0x02, 0x3f, 0xe5, 0xc4, 0x92, 0xc7, 0x12, 0x66, 0x7f, 0x14, 0xc6, 0x4b, 0x0e, 0x69, 0xfa, 0xde, - 0xb2, 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x19, 0x18, 0x60, 0x97, 0x08, 0x3f, 0x02, 0x06, 0x68, - 0x43, 0x78, 0x80, 0x3e, 0x08, 0xec, 0x2d, 0x38, 0x5d, 0xf2, 0xef, 0x79, 0xf7, 0x9c, 0xa0, 0xbe, - 0x50, 0x29, 0x6b, 0xef, 0xeb, 0x35, 0xf9, 0xbe, 0xe3, 0x46, 0x5b, 0xa9, 0x47, 0xaf, 0x56, 0x93, - 0xb3, 0xb5, 0x2b, 0x6e, 0x83, 0x64, 0x48, 0x41, 0xfe, 0x6a, 0xce, 0x68, 0x29, 0xc6, 0x57, 0x5a, - 0x2d, 0x2b, 0x53, 0xab, 0xf5, 0x06, 0x14, 0x36, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x29, 0x56, 0xe2, - 0xe3, 0xd9, 0x76, 0x28, 0x2b, 0x14, 0x53, 0x4a, 0xbd, 0xf8, 0xeb, 0x70, 0x45, 0x54, 0xc6, 0x8a, - 0x0c, 0xda, 0x81, 0x49, 0xf9, 0x60, 0x90, 0x50, 0xb1, 0x2e, 0x9f, 0xe8, 0xf6, 0x0a, 0x31, 0x89, - 0x9f, 0x7a, 0xb0, 0x3f, 0x37, 0x89, 0x13, 0x64, 0x70, 0x07, 0x61, 0xfa, 0x1c, 0x6c, 0xd2, 0x13, - 0x78, 0x80, 0x0d, 0x3f, 0x7b, 0x0e, 0xb2, 0x97, 0x2d, 0x2b, 0xb5, 0x7f, 0xcc, 0x82, 0x47, 0x3a, - 0x46, 0x46, 0xbc, 0xf0, 0x8f, 0x78, 0x16, 0x92, 0x2f, 0xee, 0x5c, 0xef, 0x17, 0xb7, 0xfd, 0x77, - 0x2c, 0x38, 0xb5, 0xdc, 0x6c, 0x45, 0x7b, 0x25, 0xd7, 0x54, 0x41, 0xbd, 0x08, 0x43, 0x4d, 0x52, - 0x77, 0xdb, 0x4d, 0x31, 0x73, 0x73, 0xf2, 0x94, 0x5a, 0x65, 0xa5, 0x07, 0xfb, 0x73, 0x63, 0xd5, - 0xc8, 0x0f, 0x9c, 0x2d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x67, 0xbd, 0xfb, 0x2e, 0xb9, 0xe9, 0x36, - 0x5d, 0x69, 0x57, 0xd4, 0x55, 0x66, 0x37, 0x2f, 0x07, 0x74, 0xfe, 0x8d, 0xb6, 0xe3, 0x45, 0x6e, - 0xb4, 0x27, 0xb4, 0x47, 0x92, 0x08, 0x8e, 0xe9, 0xd9, 0x5f, 0xb5, 0x60, 0x42, 0xae, 0xfb, 0x85, - 0x7a, 0x3d, 0x20, 0x61, 0x88, 0x66, 0x21, 0xe7, 0xb6, 0x44, 0x2f, 0x41, 0xf4, 0x32, 0x57, 0xae, - 0xe0, 0x9c, 0xdb, 0x92, 0x6c, 0x19, 0x3b, 0x08, 0xf3, 0xa6, 0x22, 0xed, 0xba, 0x28, 0xc7, 0x0a, - 0x03, 0x5d, 0x81, 0x82, 0xe7, 0xd7, 0xb9, 0x6d, 0x17, 0xbf, 0xd2, 0xd8, 0x02, 0x5b, 0x13, 0x65, - 0x58, 0x41, 0x51, 0x05, 0x8a, 0xdc, 0xec, 0x29, 0x5e, 0xb4, 0x7d, 0x19, 0x4f, 0xb1, 0x2f, 0x5b, - 0x97, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x55, 0x0b, 0x46, 0xe5, 0x97, 0xf5, 0xc9, 0x73, 0xd2, 0xad, - 0x15, 0xf3, 0x9b, 0xf1, 0xd6, 0xa2, 0x3c, 0x23, 0x83, 0x18, 0xac, 0x62, 0xfe, 0x50, 0xac, 0xe2, - 0x33, 0x30, 0xe2, 0xb4, 0x5a, 0x15, 0x93, 0xcf, 0x64, 0x4b, 0x69, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, - 0xfd, 0xa3, 0x39, 0x18, 0x97, 0x5f, 0x50, 0x6d, 0x6f, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x74, 0xf8, - 0x2c, 0x11, 0xb9, 0xc8, 0x2f, 0xa5, 0xcb, 0x11, 0x8c, 0x29, 0x8d, 0x2f, 0xfc, 0x05, 0x59, 0x1b, - 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f, 0x62, 0x87, 0xbf, 0x82, 0x77, 0x53, 0xed, 0x24, 0xa9, - 0x9f, 0x15, 0xd4, 0xa7, 0xd6, 0x92, 0x54, 0x70, 0x27, 0x61, 0xb4, 0x2c, 0x65, 0x33, 0xf9, 0x6c, - 0x61, 0x80, 0x3e, 0x71, 0xe9, 0xa2, 0x19, 0xfb, 0x57, 0x2c, 0x28, 0x4a, 0xb4, 0x93, 0xd0, 0xe2, - 0xad, 0xc2, 0x70, 0xc8, 0x26, 0x41, 0x0e, 0x8d, 0xdd, 0xad, 0xe3, 0x7c, 0xbe, 0xe2, 0x3b, 0x8d, - 0xff, 0x0f, 0xb1, 0xa4, 0xc1, 0x44, 0xf3, 0xaa, 0xfb, 0xef, 0x13, 0xd1, 0xbc, 0xea, 0x4f, 0xc6, - 0xa5, 0xf4, 0x87, 0xac, 0xcf, 0x9a, 0xac, 0x8b, 0xb2, 0x5e, 0xad, 0x80, 0x6c, 0xba, 0xf7, 0x93, - 0xac, 0x57, 0x85, 0x95, 0x62, 0x01, 0x45, 0x6f, 0xc1, 0x68, 0x4d, 0xca, 0x64, 0xe3, 0x1d, 0x7e, - 0xb9, 0xab, 0x7e, 0x40, 0xa9, 0x92, 0xb8, 0x2c, 0x64, 0x49, 0xab, 0x8f, 0x0d, 0x6a, 0xa6, 0x19, - 0x41, 0xbe, 0x97, 0x19, 0x41, 0x4c, 0x37, 0x5b, 0xa9, 0xfe, 0xe3, 0x16, 0x0c, 0x71, 0x59, 0x5c, - 0x7f, 0xa2, 0x50, 0x4d, 0xb3, 0x16, 0x8f, 0xdd, 0x1d, 0x5a, 0x28, 0x34, 0x65, 0x68, 0x15, 0x8a, - 0xec, 0x07, 0x93, 0x25, 0xe6, 0xb3, 0xad, 0xee, 0x79, 0xab, 0x7a, 0x07, 0xef, 0xc8, 0x6a, 0x38, - 0xa6, 0x60, 0xff, 0x70, 0x9e, 0x9e, 0x6e, 0x31, 0xaa, 0x71, 0xe9, 0x5b, 0xc7, 0x77, 0xe9, 0xe7, - 0x8e, 0xeb, 0xd2, 0xdf, 0x82, 0x89, 0x9a, 0xa6, 0x87, 0x8b, 0x67, 0xf2, 0x4a, 0xd7, 0x45, 0xa2, - 0xa9, 0xec, 0xb8, 0x94, 0x65, 0xc9, 0x24, 0x82, 0x93, 0x54, 0xd1, 0xb7, 0xc1, 0x28, 0x9f, 0x67, - 0xd1, 0x0a, 0xb7, 0xc4, 0xf8, 0x48, 0xf6, 0x7a, 0xd1, 0x9b, 0xe0, 0x52, 0x39, 0xad, 0x3a, 0x36, - 0x88, 0xd9, 0x7f, 0x62, 0x01, 0x5a, 0x6e, 0x6d, 0x93, 0x26, 0x09, 0x9c, 0x46, 0x2c, 0x4e, 0xff, - 0x7e, 0x0b, 0x66, 0x48, 0x47, 0xf1, 0x92, 0xdf, 0x6c, 0x8a, 0x47, 0x4b, 0xc6, 0xbb, 0x7a, 0x39, - 0xa3, 0x8e, 0x72, 0x4b, 0x98, 0xc9, 0xc2, 0xc0, 0x99, 0xed, 0xa1, 0x55, 0x98, 0xe6, 0xb7, 0xa4, - 0x02, 0x68, 0xb6, 0xd7, 0x8f, 0x0a, 0xc2, 0xd3, 0xeb, 0x9d, 0x28, 0x38, 0xad, 0x9e, 0xfd, 0x5d, - 0xa3, 0x90, 0xd9, 0x8b, 0x0f, 0xf4, 0x08, 0x1f, 0xe8, 0x11, 0x3e, 0xd0, 0x23, 0x7c, 0xa0, 0x47, - 0xf8, 0x40, 0x8f, 0xf0, 0x4d, 0xaf, 0x47, 0xf8, 0x23, 0x0b, 0xa6, 0x3b, 0xaf, 0x81, 0x93, 0x60, - 0xcc, 0xdb, 0x30, 0xdd, 0x79, 0xd7, 0x75, 0xb5, 0xb3, 0xeb, 0xec, 0x67, 0x7c, 0xef, 0xa5, 0x7c, - 0x03, 0x4e, 0xa3, 0x6f, 0xff, 0xba, 0x05, 0xa7, 0x15, 0xb2, 0xf1, 0xd2, 0xff, 0x3c, 0x4c, 0xf3, - 0xf3, 0x65, 0xa9, 0xe1, 0xb8, 0xcd, 0x75, 0xd2, 0x6c, 0x35, 0x9c, 0x48, 0x9a, 0x19, 0x3c, 0x93, - 0xba, 0x55, 0x13, 0x26, 0xba, 0x46, 0xc5, 0xc5, 0x47, 0x68, 0xbf, 0x52, 0x00, 0x38, 0xad, 0x19, - 0xc3, 0x28, 0x35, 0xd7, 0xd3, 0x4c, 0xf8, 0x17, 0x0b, 0x30, 0xb8, 0xbc, 0x4b, 0xbc, 0xe8, 0x04, - 0x26, 0xaa, 0x06, 0xe3, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xc3, 0x0f, 0xf3, 0xd0, 0x3f, - 0x23, 0x48, 0x8f, 0x97, 0x0d, 0x12, 0x38, 0x41, 0xf2, 0x38, 0x84, 0xed, 0xd7, 0x60, 0x88, 0xdf, - 0x71, 0x42, 0xd2, 0x9e, 0x7a, 0xa5, 0xb1, 0x41, 0x14, 0x37, 0x77, 0xac, 0x08, 0xe0, 0x77, 0xa8, - 0xa8, 0x8e, 0xde, 0x86, 0xf1, 0x4d, 0x37, 0x08, 0xa3, 0x75, 0xb7, 0x49, 0xc2, 0xc8, 0x69, 0xb6, - 0x1e, 0x42, 0xb8, 0xae, 0xc6, 0x61, 0xc5, 0xa0, 0x84, 0x13, 0x94, 0xd1, 0x16, 0x8c, 0x35, 0x1c, - 0xbd, 0xa9, 0xe1, 0x43, 0x37, 0xa5, 0x2e, 0xcf, 0x9b, 0x3a, 0x21, 0x6c, 0xd2, 0xa5, 0xa7, 0x4d, - 0x8d, 0xc9, 0x87, 0x0b, 0x4c, 0x6a, 0xa2, 0x4e, 0x1b, 0x2e, 0x18, 0xe6, 0x30, 0xca, 0x07, 0x32, - 0xfb, 0xe1, 0xa2, 0xc9, 0x07, 0x6a, 0x56, 0xc2, 0x9f, 0x83, 0x22, 0xa1, 0x43, 0x48, 0x09, 0x8b, - 0xfb, 0xf7, 0x6a, 0x7f, 0x7d, 0x5d, 0x75, 0x6b, 0x81, 0x6f, 0xaa, 0x35, 0x96, 0x25, 0x25, 0x1c, - 0x13, 0x45, 0x4b, 0x30, 0x14, 0x92, 0xc0, 0x25, 0xa1, 0xb8, 0x89, 0xbb, 0x4c, 0x23, 0x43, 0xe3, - 0xae, 0x37, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, 0x98, 0xc4, 0x97, 0xdd, 0x95, 0xda, 0xf2, - 0x5a, 0x60, 0xa5, 0x58, 0x40, 0xd1, 0xeb, 0x30, 0x1c, 0x90, 0x06, 0xd3, 0x9b, 0x8d, 0xf5, 0xbf, - 0xc8, 0xb9, 0x1a, 0x8e, 0xd7, 0xc3, 0x92, 0x00, 0xba, 0x01, 0x28, 0x20, 0x94, 0x8f, 0x74, 0xbd, - 0x2d, 0x65, 0x55, 0x2b, 0xee, 0x21, 0x75, 0x6e, 0xe1, 0x18, 0x43, 0x7a, 0x41, 0xe1, 0x94, 0x6a, - 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e, 0x3d, 0xff, 0x27, 0x18, 0x2d, 0x25, 0xc6, - 0xc1, 0x49, 0x04, 0xdc, 0x59, 0xc7, 0xfe, 0xb2, 0x05, 0x7c, 0x9c, 0x4f, 0x40, 0x78, 0xf1, 0x9a, - 0x29, 0xbc, 0x38, 0x9b, 0x39, 0x73, 0x19, 0x82, 0x8b, 0x2f, 0x5b, 0x30, 0xa2, 0xcd, 0x6c, 0xbc, - 0x66, 0xad, 0x2e, 0x6b, 0xb6, 0x0d, 0x93, 0x74, 0xa5, 0xdf, 0xda, 0x08, 0x49, 0xb0, 0x4b, 0xea, - 0x6c, 0x61, 0xe6, 0x1e, 0x6e, 0x61, 0x2a, 0x0b, 0xbe, 0x9b, 0x09, 0x82, 0xb8, 0xa3, 0x09, 0xfb, - 0x73, 0xb2, 0xab, 0xca, 0xe0, 0xb1, 0xa6, 0xe6, 0x3c, 0x61, 0xf0, 0xa8, 0x66, 0x15, 0xc7, 0x38, - 0x74, 0xab, 0x6d, 0xfb, 0x61, 0x94, 0x34, 0x78, 0xbc, 0xee, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x39, - 0x80, 0xe5, 0xfb, 0xa4, 0xc6, 0x57, 0xac, 0xfe, 0xb6, 0xb2, 0xb2, 0xdf, 0x56, 0xf6, 0x6f, 0x5b, - 0x30, 0xbe, 0xb2, 0x64, 0xdc, 0x73, 0xf3, 0x00, 0xfc, 0x41, 0x78, 0xf7, 0xee, 0x9a, 0xb4, 0x16, - 0xe0, 0x0a, 0x5f, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x16, 0xf2, 0x8d, 0xb6, 0x27, 0xa4, 0xab, 0xc3, - 0x94, 0x7b, 0xb8, 0xd9, 0xf6, 0x30, 0x2d, 0xd3, 0x3c, 0x2e, 0xf2, 0x7d, 0x7b, 0x5c, 0xf4, 0x8c, - 0x7c, 0x80, 0xe6, 0x60, 0xf0, 0xde, 0x3d, 0xb7, 0xce, 0xfd, 0x4b, 0x85, 0x25, 0xc3, 0xdd, 0xbb, - 0xe5, 0x52, 0x88, 0x79, 0xb9, 0xfd, 0xa5, 0x3c, 0xcc, 0xae, 0x34, 0xc8, 0xfd, 0xf7, 0xe8, 0x63, - 0xdb, 0xaf, 0xbf, 0xc8, 0xe1, 0xe4, 0x54, 0x87, 0xf5, 0x09, 0xea, 0x3d, 0x1e, 0x9b, 0x30, 0xcc, - 0xed, 0xfd, 0xa4, 0xc7, 0xed, 0x2b, 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x3c, 0xb7, 0x1b, 0x14, 0x0e, - 0x83, 0xea, 0xc2, 0x14, 0xa5, 0x58, 0x12, 0x9f, 0x7d, 0x19, 0x46, 0x75, 0xcc, 0x43, 0x79, 0xe7, - 0xfd, 0xff, 0x79, 0x98, 0xa4, 0x3d, 0x38, 0xd6, 0x89, 0xb8, 0xdd, 0x39, 0x11, 0x47, 0xed, 0xa1, - 0xd5, 0x7b, 0x36, 0xde, 0x4a, 0xce, 0xc6, 0x33, 0x59, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0x9d, 0x16, - 0x4c, 0xaf, 0x34, 0xfc, 0xda, 0x4e, 0xc2, 0x8b, 0xea, 0x05, 0x18, 0xa1, 0xc7, 0x71, 0x68, 0x38, - 0xf8, 0x1b, 0x21, 0x1f, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xcb, 0xa5, 0xb4, 0x48, - 0x11, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2d, 0x38, 0x7f, 0x6d, 0x69, 0x39, 0x5e, 0x8a, 0x1d, - 0xc1, 0x2a, 0x2e, 0xc3, 0x50, 0xab, 0xae, 0x75, 0x25, 0x96, 0x3e, 0x97, 0x58, 0x2f, 0x04, 0xf4, - 0xfd, 0x12, 0x88, 0xe5, 0xa7, 0x2d, 0x98, 0xbe, 0xe6, 0x46, 0xf4, 0x76, 0x4d, 0x86, 0x4d, 0xa0, - 0xd7, 0x6b, 0xe8, 0x46, 0x7e, 0xb0, 0x97, 0x0c, 0x9b, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, - 0x5d, 0x97, 0x59, 0x9a, 0xe7, 0x4c, 0x3d, 0x1c, 0x16, 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0xab, 0xbb, - 0x01, 0x13, 0x61, 0xee, 0x89, 0x13, 0x56, 0x7d, 0x58, 0x49, 0x02, 0x70, 0x8c, 0x43, 0x5f, 0x73, - 0x73, 0xd7, 0x1a, 0xed, 0x30, 0x22, 0xc1, 0x66, 0x98, 0x71, 0x3a, 0x3e, 0x07, 0x45, 0x22, 0x15, - 0x06, 0xa2, 0xd7, 0x8a, 0x63, 0x54, 0x9a, 0x04, 0x1e, 0xbd, 0x41, 0xe1, 0xf5, 0xe1, 0x93, 0x79, - 0x38, 0xa7, 0xba, 0x15, 0x40, 0x44, 0x6f, 0x4b, 0x0f, 0x67, 0xc1, 0xfc, 0xe2, 0x97, 0x3b, 0xa0, - 0x38, 0xa5, 0x86, 0xfd, 0x63, 0x16, 0x9c, 0x56, 0x1f, 0xfc, 0xbe, 0xfb, 0x4c, 0xfb, 0xe7, 0x72, - 0x30, 0x76, 0x7d, 0x7d, 0xbd, 0x72, 0x8d, 0x44, 0xe2, 0xda, 0xee, 0x6d, 0x06, 0x80, 0x35, 0x6d, - 0x66, 0xb7, 0xc7, 0x5c, 0x3b, 0x72, 0x1b, 0xf3, 0x3c, 0x2a, 0xd2, 0x7c, 0xd9, 0x8b, 0x6e, 0x05, - 0xd5, 0x28, 0x70, 0xbd, 0xad, 0x54, 0xfd, 0xa7, 0x64, 0x2e, 0xf2, 0x59, 0xcc, 0x05, 0x7a, 0x0e, - 0x86, 0x58, 0x58, 0x26, 0x39, 0x09, 0x8f, 0xaa, 0xb7, 0x10, 0x2b, 0x3d, 0xd8, 0x9f, 0x2b, 0xde, - 0xc6, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0x46, 0xb6, 0xa3, 0xa8, 0x75, 0x9d, 0x38, 0x75, - 0xfa, 0x74, 0xe7, 0xc7, 0xe1, 0x85, 0xb4, 0xe3, 0x90, 0x0e, 0x02, 0x47, 0x8b, 0x4f, 0x90, 0xb8, - 0x2c, 0xc4, 0x3a, 0x1d, 0xbb, 0x0a, 0x10, 0xc3, 0x8e, 0x48, 0x91, 0x63, 0xff, 0x81, 0x05, 0xc3, - 0x3c, 0x42, 0x46, 0x80, 0x5e, 0x85, 0x01, 0x72, 0x9f, 0xd4, 0x04, 0xc7, 0x9b, 0xda, 0xe1, 0x98, - 0xd3, 0xe2, 0x02, 0x69, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x3a, 0x0c, 0xd3, 0xde, 0x5e, 0x53, 0xe1, - 0x42, 0x1e, 0xcb, 0xfa, 0x62, 0x35, 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xda, 0xf3, - 0x5a, 0xab, 0x4a, 0x4f, 0xec, 0xa8, 0x1b, 0x63, 0xb1, 0xbe, 0x54, 0xe1, 0x48, 0x82, 0x1a, 0xd7, - 0x9e, 0xcb, 0x42, 0x1c, 0x13, 0xb1, 0xd7, 0xa1, 0x48, 0x27, 0x75, 0xa1, 0xe1, 0x3a, 0xdd, 0x0d, - 0x02, 0x9e, 0x84, 0xa2, 0x54, 0xf7, 0x87, 0xc2, 0x33, 0x9e, 0x51, 0x95, 0xd6, 0x00, 0x21, 0x8e, - 0xe1, 0xf6, 0x26, 0x9c, 0x62, 0xc6, 0x9b, 0x4e, 0xb4, 0x6d, 0xec, 0xb1, 0xde, 0x8b, 0xf9, 0x29, - 0xf1, 0x80, 0xe4, 0x33, 0x33, 0xa3, 0x39, 0x9f, 0x8e, 0x4a, 0x8a, 0xf1, 0x63, 0xd2, 0xfe, 0xda, - 0x00, 0x3c, 0x5a, 0xae, 0x66, 0x07, 0x4f, 0x79, 0x09, 0x46, 0x39, 0x5f, 0x4a, 0x97, 0xb6, 0xd3, - 0x10, 0xed, 0x2a, 0x49, 0xf4, 0xba, 0x06, 0xc3, 0x06, 0x26, 0x3a, 0x0f, 0x79, 0xf7, 0x1d, 0x2f, - 0xe9, 0x9a, 0x55, 0x7e, 0x63, 0x0d, 0xd3, 0x72, 0x0a, 0xa6, 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, - 0xb1, 0xb9, 0xaf, 0xc1, 0xb8, 0x1b, 0xd6, 0x42, 0xb7, 0xec, 0xd1, 0x73, 0x46, 0x3b, 0xa9, 0x94, - 0x70, 0x83, 0x76, 0x5a, 0x41, 0x71, 0x02, 0x5b, 0xbb, 0xc8, 0x06, 0xfb, 0x66, 0x93, 0x7b, 0xba, - 0x8a, 0xd3, 0x17, 0x40, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0x0a, 0xe2, 0x05, 0xc0, 0x3f, 0x38, 0xc4, - 0x12, 0x46, 0x5f, 0x8e, 0xb5, 0x6d, 0xa7, 0xb5, 0xd0, 0x8e, 0xb6, 0x4b, 0x6e, 0x58, 0xf3, 0x77, - 0x49, 0xb0, 0xc7, 0x1e, 0xfd, 0x85, 0xf8, 0xe5, 0xa8, 0x00, 0x4b, 0xd7, 0x17, 0x2a, 0x14, 0x13, - 0x77, 0xd6, 0x41, 0x0b, 0x30, 0x21, 0x0b, 0xab, 0x24, 0x64, 0x57, 0xd8, 0x08, 0x23, 0xa3, 0x9c, - 0xa5, 0x44, 0xb1, 0x22, 0x92, 0xc4, 0x37, 0x39, 0x69, 0x38, 0x0a, 0x4e, 0xfa, 0x45, 0x18, 0x73, - 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xfa, 0x30, 0xfe, 0xbe, 0x67, 0x82, 0xfe, 0xb2, 0x0e, 0xc0, - 0x26, 0x9e, 0xfd, 0x5f, 0x06, 0x60, 0x8a, 0x4d, 0xdb, 0x07, 0x2b, 0xec, 0x9b, 0x69, 0x85, 0xdd, - 0xee, 0x5c, 0x61, 0x47, 0xf1, 0x44, 0x78, 0xe8, 0x65, 0xf6, 0x36, 0x14, 0x95, 0x7f, 0x98, 0x74, - 0x10, 0xb5, 0x32, 0x1c, 0x44, 0x7b, 0x73, 0x1f, 0xd2, 0xc4, 0x2e, 0x9f, 0x6a, 0x62, 0xf7, 0xd7, - 0x2d, 0x88, 0x15, 0x3c, 0xe8, 0x3a, 0x14, 0x5b, 0x3e, 0xb3, 0x1c, 0x0d, 0xa4, 0x39, 0xf6, 0xa3, - 0xa9, 0x17, 0x15, 0xbf, 0x14, 0xf9, 0xc7, 0x57, 0x64, 0x0d, 0x1c, 0x57, 0x46, 0x8b, 0x30, 0xdc, - 0x0a, 0x48, 0x35, 0x62, 0x31, 0x54, 0x7a, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, - 0x79, 0x0b, 0x80, 0x5b, 0xb1, 0x39, 0xde, 0x16, 0x39, 0x01, 0xa9, 0x75, 0x09, 0x06, 0xc2, 0x16, - 0xa9, 0x75, 0xb3, 0xe9, 0x8d, 0xfb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, - 0xed, 0xef, 0x06, 0x18, 0x8f, 0xd1, 0xca, 0x11, 0x69, 0xa2, 0xa7, 0x8d, 0x98, 0x0a, 0x67, 0x13, - 0x31, 0x15, 0x8a, 0x0c, 0x5b, 0x13, 0x90, 0xbe, 0x0d, 0xf9, 0xa6, 0x73, 0x5f, 0x48, 0xc0, 0x9e, - 0xec, 0xde, 0x0d, 0x4a, 0x7f, 0x7e, 0xd5, 0xb9, 0xcf, 0x1f, 0x89, 0x4f, 0xca, 0x05, 0xb2, 0xea, - 0xdc, 0x3f, 0xe0, 0x96, 0xbb, 0xec, 0x90, 0xba, 0xe9, 0x86, 0xd1, 0x17, 0xfe, 0x73, 0xfc, 0x9f, - 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0x84, 0x81, 0x56, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, - 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0xef, 0xc2, 0xb0, 0xb0, 0x9f, 0x14, 0x31, 0x8c, 0xae, 0xf6, - 0xd1, 0x9e, 0x30, 0xbf, 0xe4, 0x6d, 0x5e, 0x95, 0x8f, 0x60, 0x51, 0xda, 0xb3, 0x5d, 0xd9, 0x20, - 0xfa, 0x2b, 0x16, 0x8c, 0x8b, 0xdf, 0x98, 0xbc, 0xd3, 0x26, 0x61, 0x24, 0x78, 0xcf, 0x8f, 0xf7, - 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0xe3, 0xf2, 0x98, 0x35, 0x81, 0x3d, 0x7b, 0x94, 0xe8, 0x05, - 0xfa, 0x7b, 0x16, 0x9c, 0x6a, 0x3a, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x3b, - 0x84, 0x57, 0xfb, 0x9b, 0xfe, 0x8e, 0xea, 0xbc, 0x93, 0x52, 0x59, 0x7a, 0x2a, 0x0d, 0xa5, 0x67, - 0x57, 0x53, 0xfb, 0x35, 0xbb, 0x09, 0x05, 0xb9, 0xde, 0x52, 0x44, 0x0d, 0x25, 0x9d, 0xb1, 0x3e, - 0xb4, 0xf9, 0xaa, 0x1e, 0xab, 0x80, 0xb6, 0x23, 0xd6, 0xda, 0xb1, 0xb6, 0xf3, 0x36, 0x8c, 0xea, - 0x6b, 0xec, 0x58, 0xdb, 0x7a, 0x07, 0xa6, 0x53, 0xd6, 0xd2, 0xb1, 0x36, 0x79, 0x0f, 0xce, 0x66, - 0xae, 0x8f, 0xe3, 0x6c, 0xd8, 0xfe, 0x39, 0x4b, 0x3f, 0x07, 0x4f, 0x40, 0x75, 0xb0, 0x64, 0xaa, - 0x0e, 0x2e, 0x74, 0xdf, 0x39, 0x19, 0xfa, 0x83, 0xb7, 0xf4, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x0e, - 0x43, 0x0d, 0x5a, 0x22, 0xad, 0x70, 0xed, 0xde, 0x3b, 0x32, 0xe6, 0xa5, 0x58, 0x79, 0x88, 0x05, - 0x05, 0xfb, 0x97, 0x2c, 0x18, 0x38, 0x81, 0x91, 0xc0, 0xe6, 0x48, 0x3c, 0x9d, 0x49, 0x5a, 0x84, - 0x57, 0x9e, 0xc7, 0xce, 0xbd, 0xe5, 0xfb, 0x11, 0xf1, 0x42, 0xf6, 0x54, 0x4c, 0x1d, 0x98, 0xff, - 0x07, 0xa6, 0x6f, 0xfa, 0x4e, 0x7d, 0xd1, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x65, 0x6f, 0xeb, 0x50, - 0x16, 0xe4, 0xb9, 0x5e, 0x16, 0xe4, 0xf6, 0x36, 0x20, 0xbd, 0x01, 0xe1, 0x8a, 0x83, 0x61, 0xd8, - 0xe5, 0x4d, 0x89, 0xe1, 0x7f, 0x3c, 0x9d, 0x35, 0xeb, 0xe8, 0x99, 0xe6, 0x64, 0xc2, 0x0b, 0xb0, - 0x24, 0x64, 0xbf, 0x04, 0xa9, 0xfe, 0xfc, 0xbd, 0xc5, 0x06, 0xf6, 0xa7, 0x61, 0x8a, 0xd5, 0x3c, - 0xe4, 0x93, 0xd6, 0x4e, 0x48, 0x25, 0x53, 0x22, 0xfd, 0xd9, 0x5f, 0xb4, 0x60, 0x62, 0x2d, 0x11, - 0x00, 0xed, 0x32, 0xd3, 0x63, 0xa6, 0x08, 0xc3, 0xab, 0xac, 0x14, 0x0b, 0xe8, 0x91, 0xcb, 0xa0, - 0xfe, 0xc2, 0x82, 0x38, 0xc4, 0xc6, 0x09, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0x2a, 0x1b, 0x51, - 0xdd, 0xc9, 0xe2, 0xbb, 0xd0, 0x0d, 0x15, 0x7c, 0xaa, 0x8b, 0x58, 0x24, 0x26, 0xc3, 0x43, 0x15, - 0x8d, 0x9b, 0x11, 0xaa, 0x64, 0x38, 0x2a, 0xfb, 0x3f, 0xe6, 0x00, 0x29, 0xdc, 0xbe, 0x83, 0x63, - 0x75, 0xd6, 0x38, 0x9a, 0xe0, 0x58, 0xbb, 0x80, 0x98, 0x26, 0x3e, 0x70, 0xbc, 0x90, 0x93, 0x75, - 0x85, 0xd4, 0xed, 0x70, 0x6a, 0xfe, 0x59, 0xd1, 0x24, 0xba, 0xd9, 0x41, 0x0d, 0xa7, 0xb4, 0xa0, - 0x59, 0x58, 0x0c, 0xf6, 0x6b, 0x61, 0x31, 0xd4, 0xc3, 0xdd, 0xee, 0x67, 0x2d, 0x18, 0x53, 0xc3, - 0xf4, 0x3e, 0x31, 0x86, 0x57, 0xfd, 0xc9, 0x38, 0xfa, 0x2a, 0x5a, 0x97, 0xd9, 0x95, 0xf0, 0xad, - 0xcc, 0x6d, 0xd2, 0x69, 0xb8, 0xef, 0x12, 0x15, 0x9a, 0x70, 0x4e, 0xb8, 0x41, 0x8a, 0xd2, 0x83, - 0xfd, 0xb9, 0x31, 0xf5, 0x8f, 0x87, 0x42, 0x8e, 0xab, 0xd8, 0x3f, 0x49, 0x37, 0xbb, 0xb9, 0x14, - 0xd1, 0x0b, 0x30, 0xd8, 0xda, 0x76, 0x42, 0x92, 0x70, 0x1a, 0x1a, 0xac, 0xd0, 0xc2, 0x83, 0xfd, - 0xb9, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0xc8, 0xb1, 0xce, 0xc5, 0xd9, 0x33, 0xe4, - 0xd8, 0x9f, 0x58, 0x30, 0xb0, 0xe6, 0xd7, 0x4f, 0xe2, 0x08, 0x78, 0xcd, 0x38, 0x02, 0xce, 0x65, - 0x45, 0xa9, 0xcf, 0xdc, 0xfd, 0x2b, 0x89, 0xdd, 0x7f, 0x21, 0x93, 0x42, 0xf7, 0x8d, 0xdf, 0x84, - 0x11, 0x16, 0xfb, 0x5e, 0x38, 0x48, 0x3d, 0x67, 0x6c, 0xf8, 0xb9, 0xc4, 0x86, 0x9f, 0xd0, 0x50, - 0xb5, 0x9d, 0xfe, 0x04, 0x0c, 0x0b, 0x8f, 0x9b, 0xa4, 0xf7, 0xa9, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x58, 0xfb, 0xe8, 0x57, 0x2c, 0x98, 0x0f, 0xb8, 0x25, 0x6e, 0xbd, 0xd4, 0x0e, - 0x5c, 0x6f, 0xab, 0x5a, 0xdb, 0x26, 0xf5, 0x76, 0xc3, 0xf5, 0xb6, 0xca, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0xef, 0x93, 0x5a, 0x9b, 0xa9, 0xaf, 0x7a, 0x04, 0xf6, 0x57, 0x16, 0xed, 0xcf, 0x3e, 0xd8, - 0x9f, 0x9b, 0xc7, 0x87, 0xa2, 0x8d, 0x0f, 0xd9, 0x17, 0xf4, 0x9b, 0x16, 0x5c, 0xe5, 0x21, 0xe8, - 0xfb, 0xef, 0x7f, 0x97, 0x77, 0x6e, 0x45, 0x92, 0x8a, 0x89, 0xac, 0x93, 0xa0, 0xb9, 0xf8, 0xa2, - 0x18, 0xd0, 0xab, 0x95, 0xc3, 0xb5, 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x9f, 0xe6, 0x61, 0x4c, 0x84, - 0xa6, 0x12, 0x77, 0xc0, 0x0b, 0xc6, 0x92, 0x78, 0x2c, 0xb1, 0x24, 0xa6, 0x0c, 0xe4, 0xa3, 0x39, - 0xfe, 0x43, 0x98, 0xa2, 0x87, 0xf3, 0x75, 0xe2, 0x04, 0xd1, 0x06, 0x71, 0xb8, 0xe1, 0x54, 0xfe, - 0xd0, 0xa7, 0xbf, 0x12, 0xac, 0xdd, 0x4c, 0x12, 0xc3, 0x9d, 0xf4, 0xbf, 0x99, 0xee, 0x1c, 0x0f, - 0x26, 0x3b, 0xa2, 0x8b, 0xbd, 0x09, 0x45, 0xe5, 0x2e, 0x22, 0x0e, 0x9d, 0xee, 0x41, 0xfa, 0x92, - 0x14, 0xb8, 0xf0, 0x2b, 0x76, 0x55, 0x8a, 0xc9, 0xd9, 0x7f, 0x3f, 0x67, 0x34, 0xc8, 0x27, 0x71, - 0x0d, 0x0a, 0x4e, 0x18, 0xba, 0x5b, 0x1e, 0xa9, 0x8b, 0x1d, 0xfb, 0xe1, 0xac, 0x1d, 0x6b, 0x34, - 0xc3, 0x5c, 0x76, 0x16, 0x44, 0x4d, 0xac, 0x68, 0xa0, 0xeb, 0xdc, 0x3c, 0x6d, 0x57, 0xbe, 0xd4, - 0xfa, 0xa3, 0x06, 0xd2, 0x80, 0x6d, 0x97, 0x60, 0x51, 0x1f, 0x7d, 0x86, 0xdb, 0x0f, 0xde, 0xf0, - 0xfc, 0x7b, 0xde, 0x35, 0xdf, 0x97, 0xe1, 0x1f, 0xfa, 0x23, 0x38, 0x25, 0xad, 0x06, 0x55, 0x75, - 0x6c, 0x52, 0xeb, 0x2f, 0x5c, 0xe7, 0xe7, 0x61, 0x9a, 0x92, 0x36, 0xbd, 0xb3, 0x43, 0x44, 0x60, - 0x42, 0xc4, 0x3d, 0x93, 0x65, 0x62, 0xec, 0x52, 0x1f, 0x61, 0x66, 0xed, 0x58, 0x02, 0x7c, 0xc3, - 0x24, 0x81, 0x93, 0x34, 0xed, 0x9f, 0xb2, 0x80, 0x79, 0xaa, 0x9e, 0x00, 0x3f, 0xf2, 0x09, 0x93, - 0x1f, 0x99, 0xc9, 0x1a, 0xe4, 0x0c, 0x56, 0xe4, 0x79, 0xbe, 0xb2, 0x2a, 0x81, 0x7f, 0x7f, 0x4f, - 0x18, 0x7d, 0xf4, 0x7e, 0x7f, 0xd8, 0xff, 0xdb, 0xe2, 0x87, 0x98, 0x72, 0xe6, 0x40, 0xdf, 0x0e, - 0x85, 0x9a, 0xd3, 0x72, 0x6a, 0x3c, 0x31, 0x4c, 0xa6, 0x2c, 0xce, 0xa8, 0x34, 0xbf, 0x24, 0x6a, - 0x70, 0xd9, 0x92, 0x8c, 0x9f, 0x57, 0x90, 0xc5, 0x3d, 0xe5, 0x49, 0xaa, 0xc9, 0xd9, 0x1d, 0x18, - 0x33, 0x88, 0x1d, 0xab, 0x20, 0xe2, 0xdb, 0xf9, 0x15, 0xab, 0xe2, 0x3d, 0x36, 0x61, 0xca, 0xd3, - 0xfe, 0xd3, 0x0b, 0x45, 0x3e, 0x2e, 0x3f, 0xdc, 0xeb, 0x12, 0x65, 0xb7, 0x8f, 0xe6, 0x04, 0x9b, - 0x20, 0x83, 0x3b, 0x29, 0xdb, 0x3f, 0x61, 0xc1, 0x23, 0x3a, 0xa2, 0xe6, 0x67, 0xd3, 0x4b, 0xba, - 0x5f, 0x82, 0x82, 0xdf, 0x22, 0x81, 0x13, 0xf9, 0x81, 0xb8, 0x35, 0xae, 0xc8, 0x41, 0xbf, 0x25, - 0xca, 0x0f, 0x44, 0x58, 0x75, 0x49, 0x5d, 0x96, 0x63, 0x55, 0x93, 0xbe, 0x3e, 0xd9, 0x60, 0x84, - 0xc2, 0xa3, 0x8a, 0x9d, 0x01, 0x4c, 0xd1, 0x1d, 0x62, 0x01, 0xb1, 0xbf, 0x66, 0xf1, 0x85, 0xa5, - 0x77, 0x1d, 0xbd, 0x03, 0x93, 0x4d, 0x27, 0xaa, 0x6d, 0x2f, 0xdf, 0x6f, 0x05, 0x5c, 0x57, 0x22, - 0xc7, 0xe9, 0xc9, 0x5e, 0xe3, 0xa4, 0x7d, 0x64, 0x6c, 0x12, 0xb9, 0x9a, 0x20, 0x86, 0x3b, 0xc8, - 0xa3, 0x0d, 0x18, 0x61, 0x65, 0xcc, 0x59, 0x30, 0xec, 0xc6, 0x1a, 0x64, 0xb5, 0xa6, 0x6c, 0x05, - 0x56, 0x63, 0x3a, 0x58, 0x27, 0x6a, 0xff, 0x4c, 0x9e, 0xef, 0x76, 0xc6, 0xca, 0x3f, 0x01, 0xc3, - 0x2d, 0xbf, 0xbe, 0x54, 0x2e, 0x61, 0x31, 0x0b, 0xea, 0x1a, 0xa9, 0xf0, 0x62, 0x2c, 0xe1, 0xe8, - 0x0a, 0x14, 0xc4, 0x4f, 0xa9, 0xdb, 0x62, 0x67, 0xb3, 0xc0, 0x0b, 0xb1, 0x82, 0xa2, 0x67, 0x01, - 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x10, 0x8b, 0xbc, 0x69, 0xe6, 0x53, 0x51, 0x10, 0xac, 0x61, - 0xa1, 0x57, 0x60, 0xac, 0xed, 0x85, 0x9c, 0x1d, 0xd1, 0x42, 0xd6, 0x2a, 0x03, 0x94, 0xdb, 0x3a, - 0x10, 0x9b, 0xb8, 0x68, 0x01, 0x86, 0x22, 0x87, 0x99, 0xad, 0x0c, 0x66, 0x9b, 0xcd, 0xae, 0x53, - 0x0c, 0x3d, 0x07, 0x09, 0xad, 0x80, 0x45, 0x45, 0xf4, 0xa6, 0xf4, 0xdb, 0xe5, 0x07, 0xbb, 0xb0, - 0x57, 0xef, 0xef, 0x12, 0xd0, 0xbc, 0x76, 0x85, 0x1d, 0xbc, 0x41, 0x0b, 0xbd, 0x0c, 0x40, 0xee, - 0x47, 0x24, 0xf0, 0x9c, 0x86, 0xb2, 0x0a, 0x53, 0x7c, 0x41, 0xc9, 0x5f, 0xf3, 0xa3, 0xdb, 0x21, - 0x59, 0x56, 0x18, 0x58, 0xc3, 0xb6, 0x7f, 0xb3, 0x08, 0x10, 0xf3, 0xed, 0xe8, 0xdd, 0x8e, 0x83, - 0xeb, 0xa9, 0xee, 0x9c, 0xfe, 0xd1, 0x9d, 0x5a, 0xe8, 0x7b, 0x2c, 0x18, 0x71, 0x1a, 0x0d, 0xbf, - 0xe6, 0xf0, 0xa0, 0xc2, 0xb9, 0xee, 0x07, 0xa7, 0x68, 0x7f, 0x21, 0xae, 0xc1, 0xbb, 0xf0, 0x9c, - 0x5c, 0xa1, 0x1a, 0xa4, 0x67, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x53, 0x31, 0x6f, 0x0c, 0xa5, - 0x7a, 0x2a, 0x16, 0xd9, 0x1d, 0xa1, 0xbf, 0x12, 0x6f, 0x1b, 0xaf, 0xc4, 0x81, 0x6c, 0xc7, 0x44, - 0x83, 0x7d, 0xed, 0xf5, 0x40, 0x44, 0x15, 0x3d, 0x48, 0xc1, 0x60, 0xb6, 0x17, 0xa0, 0xf6, 0x4e, - 0xea, 0x11, 0xa0, 0xe0, 0x6d, 0x98, 0xa8, 0x9b, 0x4c, 0x80, 0x58, 0x89, 0x8f, 0x67, 0xd1, 0x4d, - 0xf0, 0x0c, 0xf1, 0xb5, 0x9f, 0x00, 0xe0, 0x24, 0x61, 0x54, 0xe1, 0x31, 0x2b, 0xca, 0xde, 0xa6, - 0x2f, 0x7c, 0x26, 0xec, 0xcc, 0xb9, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x98, 0xf1, 0xed, 0xbe, 0x26, - 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x43, 0xcc, 0x0d, 0x2c, 0x9c, 0x29, 0x64, 0xcb, 0x8a, 0xcd, - 0x20, 0x6c, 0xf1, 0x86, 0x64, 0x7f, 0x43, 0x2c, 0x28, 0xa0, 0xeb, 0xd2, 0xc9, 0x32, 0x2c, 0x7b, - 0xb7, 0x43, 0xc2, 0x9c, 0x2c, 0x8b, 0x8b, 0x1f, 0x8e, 0xfd, 0x27, 0x79, 0x79, 0x6a, 0xa6, 0x32, - 0xa3, 0x26, 0xe5, 0xa2, 0xc4, 0x7f, 0x99, 0x00, 0x6d, 0x06, 0xb2, 0xbb, 0x67, 0x26, 0x49, 0x8b, - 0x87, 0xf3, 0x8e, 0x49, 0x02, 0x27, 0x69, 0x52, 0x8e, 0x94, 0xef, 0x7a, 0xe1, 0x75, 0xd1, 0xeb, - 0xec, 0xe0, 0x0f, 0x71, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x9f, 0x28, 0x7b, 0x30, 0xeb, 0xc1, - 0x64, 0x72, 0x8b, 0x1e, 0x2b, 0x3b, 0xf2, 0x07, 0x03, 0x30, 0x6e, 0x2e, 0x29, 0x74, 0x15, 0x8a, - 0x82, 0x88, 0x4a, 0x5a, 0xa0, 0x76, 0xc9, 0xaa, 0x04, 0xe0, 0x18, 0x87, 0xe5, 0xaa, 0x60, 0xd5, - 0x35, 0x33, 0xdb, 0x38, 0x57, 0x85, 0x82, 0x60, 0x0d, 0x8b, 0x3e, 0xac, 0x36, 0x7c, 0x3f, 0x52, - 0x17, 0x92, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x88, 0x76, 0x48, 0xe0, 0x91, 0x86, - 0x19, 0xde, 0x58, 0x5d, 0x44, 0x37, 0x74, 0x20, 0x36, 0x71, 0xe9, 0x75, 0xea, 0x87, 0x6c, 0x21, - 0x8b, 0xe7, 0x5b, 0x6c, 0xb6, 0x5c, 0xe5, 0x7e, 0xde, 0x12, 0x8e, 0x3e, 0x0d, 0x8f, 0xa8, 0x10, - 0x4e, 0x98, 0xeb, 0x21, 0x64, 0x8b, 0x43, 0x86, 0xb4, 0xe5, 0x91, 0xa5, 0x74, 0x34, 0x9c, 0x55, - 0x1f, 0xbd, 0x06, 0xe3, 0x82, 0xc5, 0x97, 0x14, 0x87, 0x4d, 0xd3, 0x98, 0x1b, 0x06, 0x14, 0x27, - 0xb0, 0x65, 0x80, 0x66, 0xc6, 0x65, 0x4b, 0x0a, 0x85, 0xce, 0x00, 0xcd, 0x3a, 0x1c, 0x77, 0xd4, - 0x40, 0x0b, 0x30, 0xc1, 0x79, 0x30, 0xd7, 0xdb, 0xe2, 0x73, 0x22, 0x9c, 0xa2, 0xd4, 0x96, 0xba, - 0x65, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x76, 0x23, 0x52, 0x8b, 0xda, - 0x01, 0xf7, 0x96, 0xd2, 0x6c, 0x8b, 0x16, 0x34, 0x18, 0x36, 0x30, 0xed, 0x77, 0x61, 0x3a, 0x25, - 0x00, 0x04, 0x5d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x30, 0x40, 0x5e, 0xa8, 0x94, 0xe5, 0xd7, - 0x68, 0x58, 0x74, 0x75, 0xb2, 0x40, 0x11, 0x5a, 0xbe, 0x43, 0xb5, 0x3a, 0x57, 0x24, 0x00, 0xc7, - 0x38, 0xf6, 0xff, 0xc8, 0xc1, 0x44, 0x8a, 0x6e, 0x85, 0xe5, 0xdc, 0x4b, 0x3c, 0x52, 0xe2, 0x14, - 0x7b, 0x66, 0xbc, 0xef, 0xdc, 0x21, 0xe2, 0x7d, 0xe7, 0x7b, 0xc5, 0xfb, 0x1e, 0x78, 0x2f, 0xf1, - 0xbe, 0xcd, 0x11, 0x1b, 0xec, 0x6b, 0xc4, 0x52, 0x62, 0x84, 0x0f, 0x1d, 0x32, 0x46, 0xb8, 0x31, - 0xe8, 0xc3, 0x7d, 0x0c, 0xfa, 0x0f, 0xe7, 0x60, 0x32, 0x69, 0x03, 0x79, 0x02, 0x72, 0xdb, 0xd7, - 0x0d, 0xb9, 0xed, 0x95, 0x7e, 0x5c, 0x5e, 0x33, 0x65, 0xb8, 0x38, 0x21, 0xc3, 0xfd, 0x68, 0x5f, - 0xd4, 0xba, 0xcb, 0x73, 0xff, 0x66, 0x0e, 0x4e, 0xa7, 0xfa, 0xdc, 0x9e, 0xc0, 0xd8, 0xdc, 0x32, - 0xc6, 0xe6, 0xe9, 0xbe, 0xdd, 0x81, 0x33, 0x07, 0xe8, 0x6e, 0x62, 0x80, 0xae, 0xf6, 0x4f, 0xb2, - 0xfb, 0x28, 0x7d, 0x35, 0x0f, 0x17, 0x52, 0xeb, 0xc5, 0x62, 0xcf, 0x15, 0x43, 0xec, 0xf9, 0x6c, - 0x42, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x34, 0x72, 0x50, 0xe1, 0xe8, 0xca, 0xa2, 0x19, 0x3c, 0xa4, - 0x0c, 0xd4, 0x70, 0x74, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x9b, 0x49, 0xf6, 0xf9, 0xaf, 0x2d, 0x38, - 0x9b, 0x3a, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x33, 0x65, 0x5d, 0x4f, 0xf4, 0xbd, 0x5a, 0x33, 0x84, - 0x5f, 0xbf, 0x3e, 0x90, 0xf1, 0x2d, 0xec, 0x25, 0x7f, 0x0b, 0x46, 0x9c, 0x5a, 0x8d, 0x84, 0xe1, - 0xaa, 0x5f, 0x57, 0x21, 0x8d, 0x9f, 0x66, 0xef, 0xac, 0xb8, 0xf8, 0x60, 0x7f, 0x6e, 0x36, 0x49, - 0x22, 0x06, 0x63, 0x9d, 0x02, 0xfa, 0x0c, 0x14, 0x42, 0x71, 0x6f, 0x8a, 0xb9, 0x7f, 0xae, 0xcf, - 0xc1, 0x71, 0x36, 0x48, 0xc3, 0x8c, 0xb9, 0xa4, 0x24, 0x15, 0x8a, 0xa4, 0x19, 0x9f, 0x25, 0x77, - 0xa4, 0xf1, 0x59, 0x9e, 0x05, 0xd8, 0x55, 0x8f, 0x81, 0xa4, 0xfc, 0x41, 0x7b, 0x26, 0x68, 0x58, - 0xe8, 0x93, 0x30, 0x19, 0xf2, 0xa0, 0x84, 0x4b, 0x0d, 0x27, 0x64, 0x6e, 0x2e, 0x62, 0x15, 0xb2, - 0xb8, 0x4e, 0xd5, 0x04, 0x0c, 0x77, 0x60, 0xa3, 0x15, 0xd9, 0x2a, 0x8b, 0xa0, 0xc8, 0x17, 0xe6, - 0xe5, 0xb8, 0x45, 0x91, 0xf1, 0xf7, 0x54, 0x72, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, - 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xec, 0xc3, 0x93, 0x9e, 0x2a, 0xf5, 0x54, 0xab, 0x5c, 0xe6, - 0x9b, 0x5a, 0x52, 0x44, 0xb0, 0x46, 0xd0, 0xfe, 0xe1, 0x01, 0x78, 0xb4, 0xcb, 0x19, 0x89, 0x16, - 0x4c, 0x3d, 0xec, 0x93, 0xc9, 0xc7, 0xf5, 0x6c, 0x6a, 0x65, 0xe3, 0xb5, 0x9d, 0x58, 0x8a, 0xb9, - 0xf7, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, 0xc4, 0x1e, 0xdc, 0x56, 0xf3, 0x13, 0x87, 0x3c, 0xfb, 0x8f, - 0x50, 0x0e, 0xb2, 0x99, 0x22, 0x4c, 0x78, 0xb6, 0xef, 0xee, 0xf4, 0x2d, 0x5d, 0x38, 0x59, 0x29, - 0xf1, 0x6f, 0x5b, 0x70, 0xbe, 0x6b, 0x70, 0x8e, 0x6f, 0x40, 0x86, 0xc1, 0xfe, 0x82, 0x05, 0x8f, - 0xa5, 0xd6, 0x30, 0xcc, 0x8c, 0xae, 0x42, 0xb1, 0x46, 0x0b, 0x35, 0xff, 0xca, 0xd8, 0xf1, 0x5c, - 0x02, 0x70, 0x8c, 0x73, 0xc8, 0xc0, 0x23, 0xbf, 0x6a, 0x41, 0xc7, 0xa6, 0x3f, 0x81, 0xdb, 0xa7, - 0x6c, 0xde, 0x3e, 0x1f, 0xee, 0x67, 0x34, 0x33, 0x2e, 0x9e, 0x3f, 0x9e, 0x80, 0x33, 0x19, 0xfe, - 0x45, 0xbb, 0x30, 0xb5, 0x55, 0x23, 0xa6, 0xe7, 0x6a, 0xb7, 0xf8, 0x2f, 0x5d, 0xdd, 0x5c, 0x59, - 0x4e, 0xd2, 0xa9, 0x0e, 0x14, 0xdc, 0xd9, 0x04, 0xfa, 0x82, 0x05, 0xa7, 0x9c, 0x7b, 0xe1, 0x32, - 0xe5, 0x22, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0xd0, 0x23, 0x5a, 0x6e, 0x84, 0xe7, 0x53, 0x25, - 0x3b, 0x77, 0xab, 0x1d, 0xf8, 0x46, 0xf3, 0x2c, 0x49, 0x6b, 0x1a, 0x16, 0x4e, 0x6d, 0x0b, 0x61, - 0x11, 0xb9, 0x9f, 0xbe, 0x51, 0xba, 0xf8, 0x56, 0xa7, 0x39, 0x82, 0xf1, 0x6b, 0x51, 0x42, 0xb0, - 0xa2, 0x83, 0x3e, 0x07, 0xc5, 0x2d, 0xe9, 0x9d, 0x99, 0x72, 0xed, 0xc6, 0x03, 0xd9, 0xdd, 0x67, - 0x95, 0xab, 0x67, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0xd7, 0x20, 0xef, 0x6d, 0x86, 0xdd, 0xf2, 0x9c, - 0x26, 0xec, 0xf0, 0x78, 0x04, 0x83, 0xb5, 0x95, 0x2a, 0xa6, 0x15, 0xd1, 0x75, 0xc8, 0x07, 0x1b, - 0x75, 0x21, 0x96, 0x4c, 0xdd, 0xa4, 0x78, 0xb1, 0x94, 0xd1, 0x2b, 0x46, 0x09, 0x2f, 0x96, 0x30, - 0x25, 0x81, 0x2a, 0x30, 0xc8, 0x9c, 0x72, 0xc4, 0x25, 0x97, 0xca, 0xce, 0x77, 0x71, 0x6e, 0xe3, - 0x61, 0x0e, 0x18, 0x02, 0xe6, 0x84, 0xd0, 0x3a, 0x0c, 0xd5, 0x58, 0x4e, 0x4c, 0x11, 0xf1, 0xed, - 0x63, 0xa9, 0x02, 0xc8, 0x2e, 0xc9, 0x42, 0x85, 0x3c, 0x8e, 0x61, 0x60, 0x41, 0x8b, 0x51, 0x25, - 0xad, 0xed, 0xcd, 0x50, 0xe4, 0x70, 0x4e, 0xa7, 0xda, 0x25, 0x07, 0xae, 0xa0, 0xca, 0x30, 0xb0, - 0xa0, 0x85, 0x5e, 0x86, 0xdc, 0x66, 0x4d, 0x38, 0xdc, 0xa4, 0x4a, 0x22, 0xcd, 0x20, 0x14, 0x8b, - 0x43, 0x0f, 0xf6, 0xe7, 0x72, 0x2b, 0x4b, 0x38, 0xb7, 0x59, 0x43, 0x6b, 0x30, 0xbc, 0xc9, 0xdd, - 0xd6, 0x85, 0xb0, 0xf1, 0xf1, 0x74, 0x8f, 0xfa, 0x0e, 0xcf, 0x76, 0xee, 0x6b, 0x22, 0x00, 0x58, - 0x12, 0x61, 0x81, 0xf0, 0x95, 0xfb, 0xbd, 0x08, 0x8e, 0x36, 0x7f, 0xb8, 0x90, 0x09, 0x9c, 0xe9, - 0x88, 0x9d, 0xf8, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0x26, 0xd2, 0x17, 0x61, 0x62, 0x52, 0x57, - 0xb5, 0xca, 0xb6, 0xdf, 0x6d, 0x55, 0x2b, 0x24, 0x1c, 0x13, 0x45, 0x3b, 0x30, 0xb6, 0x1b, 0xb6, - 0xb6, 0x89, 0xdc, 0xd2, 0x2c, 0x6a, 0x4c, 0xc6, 0xbd, 0x7c, 0x47, 0x20, 0xba, 0x41, 0xd4, 0x76, - 0x1a, 0x1d, 0xa7, 0x10, 0xd3, 0xe9, 0xdf, 0xd1, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x77, 0xda, - 0xfe, 0xc6, 0x5e, 0x44, 0x44, 0x4c, 0xb3, 0xd4, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0e, 0xbf, 0x00, - 0x60, 0x49, 0x04, 0xdd, 0x11, 0xc3, 0xc3, 0x4e, 0xcf, 0xc9, 0xec, 0xc0, 0xa3, 0x0b, 0x12, 0x29, - 0x63, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x6b, 0xdb, 0x8f, 0x7c, 0x2f, 0x71, 0x42, - 0x4f, 0x65, 0x9f, 0x92, 0x95, 0x14, 0xfc, 0xce, 0x53, 0x32, 0x0d, 0x0b, 0xa7, 0xb6, 0x85, 0xea, - 0x30, 0xde, 0xf2, 0x83, 0xe8, 0x9e, 0x1f, 0xc8, 0xf5, 0x85, 0xba, 0x08, 0x4b, 0x0c, 0x4c, 0xd1, - 0x22, 0x0b, 0x17, 0x68, 0x42, 0x70, 0x82, 0x26, 0xfa, 0x14, 0x0c, 0x87, 0x35, 0xa7, 0x41, 0xca, - 0xb7, 0x66, 0xa6, 0xb3, 0xaf, 0x9f, 0x2a, 0x47, 0xc9, 0x58, 0x5d, 0x3c, 0x6a, 0x3e, 0x47, 0xc1, - 0x92, 0x1c, 0x5a, 0x81, 0x41, 0x96, 0xe8, 0x8c, 0x05, 0xe0, 0xcb, 0x88, 0x9f, 0xda, 0x61, 0x15, - 0xcd, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0xc4, 0x9b, 0xc1, 0x0f, 0x67, 0x4e, 0x67, - 0xef, 0x01, 0xf1, 0xd4, 0xb8, 0x55, 0xed, 0xb6, 0x07, 0x14, 0x12, 0x8e, 0x89, 0xd2, 0x93, 0x99, - 0x9e, 0xa6, 0x67, 0xba, 0x98, 0xf3, 0x64, 0x9e, 0xa5, 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, - 0xff, 0xde, 0x70, 0x27, 0xcf, 0xc2, 0x5e, 0x99, 0xdf, 0x65, 0x75, 0x28, 0x20, 0x3f, 0xde, 0xaf, - 0xd0, 0xeb, 0x08, 0x59, 0xf0, 0x2f, 0x58, 0x70, 0xa6, 0x95, 0xfa, 0x21, 0x82, 0x01, 0xe8, 0x4f, - 0x76, 0xc6, 0x3f, 0x5d, 0x05, 0x6b, 0x4c, 0x87, 0xe3, 0x8c, 0x96, 0x92, 0xcf, 0x9c, 0xfc, 0x7b, - 0x7e, 0xe6, 0xac, 0x42, 0x81, 0x31, 0x99, 0x3d, 0x72, 0x44, 0x27, 0x5f, 0x7b, 0x8c, 0x95, 0x58, - 0x12, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xd0, 0x82, 0xf3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0xc2, - 0x23, 0x7f, 0xe0, 0xae, 0x88, 0xef, 0xef, 0xe0, 0xff, 0x0d, 0xe4, 0x83, 0x5e, 0x08, 0xb8, 0x7b, - 0x63, 0xa8, 0x94, 0xf2, 0xc2, 0x1e, 0x32, 0xb5, 0x0a, 0x7d, 0xbc, 0xb2, 0x9f, 0x87, 0xd1, 0xa6, - 0xdf, 0xf6, 0x22, 0x61, 0xfd, 0x23, 0x2c, 0x11, 0x98, 0x06, 0x7e, 0x55, 0x2b, 0xc7, 0x06, 0x56, - 0xe2, 0x6d, 0x5e, 0x78, 0xe8, 0xb7, 0xf9, 0x5b, 0x30, 0xea, 0x69, 0xe6, 0xaa, 0x82, 0x1f, 0xb8, - 0x9c, 0x1d, 0x9d, 0x55, 0x37, 0x6e, 0xe5, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xed, 0x64, 0x1f, 0x7c, - 0x5f, 0xb6, 0x52, 0x98, 0x7a, 0x2e, 0x02, 0x78, 0xd5, 0x14, 0x01, 0x5c, 0x4e, 0x8a, 0x00, 0x3a, - 0x24, 0xca, 0xc6, 0xeb, 0xbf, 0xff, 0xe4, 0x33, 0xfd, 0x86, 0x30, 0xb4, 0x1b, 0x70, 0xb1, 0xd7, - 0xb5, 0xc4, 0xcc, 0xc0, 0xea, 0x4a, 0x7f, 0x18, 0x9b, 0x81, 0xd5, 0xcb, 0x25, 0xcc, 0x20, 0xfd, - 0x06, 0xc7, 0xb1, 0xff, 0x9b, 0x05, 0xf9, 0x8a, 0x5f, 0x3f, 0x81, 0x07, 0xef, 0x27, 0x8c, 0x07, - 0xef, 0xa3, 0xe9, 0x17, 0x62, 0x3d, 0x53, 0x1e, 0xbe, 0x9c, 0x90, 0x87, 0x9f, 0xcf, 0x22, 0xd0, - 0x5d, 0xfa, 0xfd, 0x93, 0x79, 0x18, 0xa9, 0xf8, 0x75, 0x65, 0x83, 0xfd, 0xeb, 0x0f, 0x63, 0x83, - 0x9d, 0x99, 0x42, 0x41, 0xa3, 0xcc, 0xac, 0xc7, 0xa4, 0xe3, 0xe8, 0x37, 0x98, 0x29, 0xf6, 0x5d, - 0xe2, 0x6e, 0x6d, 0x47, 0xa4, 0x9e, 0xfc, 0x9c, 0x93, 0x33, 0xc5, 0xfe, 0xaf, 0x16, 0x4c, 0x24, - 0x5a, 0x47, 0x0d, 0x18, 0x6b, 0xe8, 0xd2, 0x56, 0xb1, 0x4e, 0x1f, 0x4a, 0x50, 0x2b, 0x4c, 0x59, - 0xb5, 0x22, 0x6c, 0x12, 0x47, 0xf3, 0x00, 0x4a, 0xfd, 0x28, 0xc5, 0x7a, 0x8c, 0xeb, 0x57, 0xfa, - 0xc9, 0x10, 0x6b, 0x18, 0xe8, 0x05, 0x18, 0x89, 0xfc, 0x96, 0xdf, 0xf0, 0xb7, 0xf6, 0x6e, 0x10, - 0x19, 0x8e, 0x49, 0x19, 0xa8, 0xad, 0xc7, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x9d, 0xe7, 0x1f, 0xea, - 0x45, 0xee, 0x07, 0x6b, 0xf2, 0xfd, 0xbd, 0x26, 0xbf, 0x6a, 0xc1, 0x24, 0x6d, 0x9d, 0xd9, 0xc0, - 0xc8, 0xcb, 0x56, 0x45, 0x65, 0xb6, 0xba, 0x44, 0x65, 0xbe, 0x4c, 0xcf, 0xae, 0xba, 0xdf, 0x8e, - 0x84, 0x04, 0x4d, 0x3b, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0x82, 0x40, 0xf8, 0xed, 0xe9, - 0x78, 0x24, 0x08, 0xb0, 0x80, 0xca, 0xa0, 0xcd, 0x03, 0xe9, 0x41, 0x9b, 0x79, 0x70, 0x49, 0x61, - 0x2d, 0x21, 0xd8, 0x1e, 0x2d, 0xb8, 0xa4, 0x34, 0xa3, 0x88, 0x71, 0xec, 0x9f, 0xcb, 0xc3, 0x68, - 0xc5, 0xaf, 0xc7, 0x0a, 0xc0, 0xe7, 0x0d, 0x05, 0xe0, 0xc5, 0x84, 0x02, 0x70, 0x52, 0xc7, 0xfd, - 0x40, 0xdd, 0xf7, 0xf5, 0x52, 0xf7, 0xfd, 0x13, 0x8b, 0xcd, 0x5a, 0x69, 0xad, 0xca, 0x4d, 0xaa, - 0xd0, 0x33, 0x30, 0xc2, 0x0e, 0x24, 0xe6, 0x28, 0x2a, 0xb5, 0x62, 0x2c, 0x19, 0xd1, 0x5a, 0x5c, - 0x8c, 0x75, 0x1c, 0x74, 0x05, 0x0a, 0x21, 0x71, 0x82, 0xda, 0xb6, 0x3a, 0xe3, 0x84, 0x0a, 0x8b, - 0x97, 0x61, 0x05, 0x45, 0x6f, 0xc4, 0x71, 0x0d, 0xf3, 0xd9, 0x8e, 0x67, 0x7a, 0x7f, 0xf8, 0x16, - 0xc9, 0x0e, 0x66, 0x68, 0xdf, 0x05, 0xd4, 0x89, 0xdf, 0x47, 0x40, 0xaf, 0x39, 0x33, 0xa0, 0x57, - 0xb1, 0x23, 0x98, 0xd7, 0x9f, 0x5b, 0x30, 0x5e, 0xf1, 0xeb, 0x74, 0xeb, 0x7e, 0x33, 0xed, 0x53, - 0x3d, 0xa8, 0xeb, 0x50, 0x97, 0xa0, 0xae, 0x97, 0x60, 0xb0, 0xe2, 0xd7, 0xcb, 0x95, 0x6e, 0x0e, - 0xdb, 0xf6, 0xdf, 0xb2, 0x60, 0xb8, 0xe2, 0xd7, 0x4f, 0x40, 0x38, 0xff, 0xaa, 0x29, 0x9c, 0x7f, - 0x24, 0x63, 0xdd, 0x64, 0xc8, 0xe3, 0xff, 0xc6, 0x00, 0x8c, 0xd1, 0x7e, 0xfa, 0x5b, 0x72, 0x2a, - 0x8d, 0x61, 0xb3, 0xfa, 0x18, 0x36, 0xca, 0x0b, 0xfb, 0x8d, 0x86, 0x7f, 0x2f, 0x39, 0xad, 0x2b, - 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x0a, 0x0a, 0xad, 0x80, 0xec, 0xba, 0xbe, 0x60, 0x32, 0x35, 0x55, - 0x47, 0x45, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0xa1, 0xeb, 0xd5, 0x48, 0x95, 0xd4, 0x7c, 0xaf, - 0xce, 0xe5, 0xd7, 0x79, 0x91, 0x98, 0x41, 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x45, 0xf6, 0x9f, - 0x1d, 0x3b, 0x87, 0x4f, 0xf1, 0x29, 0x52, 0xbe, 0x09, 0x02, 0x38, 0xa6, 0x85, 0x9e, 0x05, 0x88, - 0x64, 0xf4, 0xee, 0x50, 0x04, 0x6f, 0x52, 0x0c, 0xb9, 0x8a, 0xeb, 0x1d, 0x62, 0x0d, 0x0b, 0x3d, - 0x09, 0xc5, 0xc8, 0x71, 0x1b, 0x37, 0x5d, 0x8f, 0x84, 0x4c, 0x2e, 0x9d, 0x97, 0x99, 0xd7, 0x44, - 0x21, 0x8e, 0xe1, 0x94, 0x21, 0x62, 0x91, 0x0d, 0x78, 0x82, 0xe0, 0x02, 0xc3, 0x66, 0x0c, 0xd1, - 0x4d, 0x55, 0x8a, 0x35, 0x0c, 0xb4, 0x0d, 0xe7, 0x5c, 0x8f, 0x25, 0x31, 0x20, 0xd5, 0x1d, 0xb7, - 0xb5, 0x7e, 0xb3, 0x7a, 0x87, 0x04, 0xee, 0xe6, 0xde, 0xa2, 0x53, 0xdb, 0x21, 0x9e, 0x4c, 0xde, - 0xf8, 0x61, 0xd1, 0xc5, 0x73, 0xe5, 0x2e, 0xb8, 0xb8, 0x2b, 0x25, 0xfb, 0x25, 0x38, 0x5d, 0xf1, - 0xeb, 0x15, 0x3f, 0x88, 0x56, 0xfc, 0xe0, 0x9e, 0x13, 0xd4, 0xe5, 0x4a, 0x99, 0x93, 0x79, 0x5e, - 0xe8, 0x51, 0x38, 0xc8, 0x0f, 0x0a, 0x23, 0xdb, 0xd8, 0x73, 0x8c, 0xf9, 0x3a, 0xa4, 0x87, 0x4d, - 0x8d, 0xb1, 0x01, 0x2a, 0xa3, 0xc7, 0x35, 0x27, 0x22, 0xe8, 0x16, 0xcb, 0x54, 0x1c, 0xdf, 0x88, - 0xa2, 0xfa, 0x13, 0x5a, 0xa6, 0xe2, 0x18, 0x98, 0x7a, 0x85, 0x9a, 0xf5, 0xed, 0xff, 0x3e, 0xc8, - 0x0e, 0xc7, 0x44, 0x56, 0x08, 0xf4, 0x59, 0x18, 0x0f, 0xc9, 0x4d, 0xd7, 0x6b, 0xdf, 0x97, 0x32, - 0x81, 0x2e, 0x3e, 0x52, 0xd5, 0x65, 0x1d, 0x93, 0x4b, 0x16, 0xcd, 0x32, 0x9c, 0xa0, 0x86, 0x9a, - 0x30, 0x7e, 0xcf, 0xf5, 0xea, 0xfe, 0xbd, 0x50, 0xd2, 0x2f, 0x64, 0x0b, 0x18, 0xef, 0x72, 0xcc, - 0x44, 0x1f, 0x8d, 0xe6, 0xee, 0x1a, 0xc4, 0x70, 0x82, 0x38, 0x5d, 0x80, 0x41, 0xdb, 0x5b, 0x08, - 0x6f, 0x87, 0x24, 0x10, 0x39, 0xa7, 0xd9, 0x02, 0xc4, 0xb2, 0x10, 0xc7, 0x70, 0xba, 0x00, 0xd9, - 0x9f, 0x6b, 0x81, 0xdf, 0xe6, 0x31, 0xf6, 0xc5, 0x02, 0xc4, 0xaa, 0x14, 0x6b, 0x18, 0x74, 0x83, - 0xb2, 0x7f, 0x6b, 0xbe, 0x87, 0x7d, 0x3f, 0x92, 0x5b, 0x9a, 0x65, 0x39, 0xd5, 0xca, 0xb1, 0x81, - 0x85, 0x56, 0x00, 0x85, 0xed, 0x56, 0xab, 0xc1, 0x8c, 0x2f, 0x9c, 0x06, 0x23, 0xc5, 0x15, 0xdf, - 0x79, 0x1e, 0x7a, 0xb4, 0xda, 0x01, 0xc5, 0x29, 0x35, 0xe8, 0x59, 0xbd, 0x29, 0xba, 0x3a, 0xc8, - 0xba, 0xca, 0x95, 0x11, 0x55, 0xde, 0x4f, 0x09, 0x43, 0xcb, 0x30, 0x1c, 0xee, 0x85, 0xb5, 0x48, - 0xc4, 0x50, 0xcb, 0x48, 0xfc, 0x53, 0x65, 0x28, 0x5a, 0xde, 0x39, 0x5e, 0x05, 0xcb, 0xba, 0xa8, - 0x06, 0xd3, 0x82, 0xe2, 0xd2, 0xb6, 0xe3, 0xa9, 0x34, 0x2a, 0xdc, 0x06, 0xf5, 0x99, 0x07, 0xfb, - 0x73, 0xd3, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x3f, 0x77, 0xa6, 0xe2, 0xd7, 0x53, 0x20, 0x38, 0x8d, - 0x1a, 0x5f, 0x7c, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xd3, 0x6d, 0x90, 0x6e, 0x0a, 0x9d, - 0xaa, 0x81, 0x29, 0x16, 0x9f, 0x51, 0x86, 0x13, 0xd4, 0xec, 0x6f, 0x67, 0xfc, 0x0c, 0x4b, 0xb3, - 0x1c, 0xb5, 0x03, 0x82, 0x9a, 0x30, 0xd6, 0x62, 0xdb, 0x44, 0x44, 0xbe, 0x17, 0x6b, 0xfd, 0xf9, - 0x3e, 0x05, 0x13, 0xf7, 0xe8, 0x35, 0xa0, 0x04, 0x87, 0xec, 0xc5, 0x57, 0xd1, 0xc9, 0x61, 0x93, - 0xba, 0xfd, 0x63, 0x8f, 0xb0, 0x1b, 0xb1, 0xca, 0xa5, 0x0d, 0xc3, 0xc2, 0xe4, 0x5d, 0x3c, 0xad, - 0x66, 0xb3, 0xc5, 0x5e, 0xf1, 0xb4, 0x08, 0xb3, 0x79, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x38, 0x7d, - 0xa9, 0x68, 0xf9, 0x4b, 0x4e, 0x65, 0x87, 0x26, 0x88, 0xd3, 0x96, 0x68, 0x59, 0x31, 0xf4, 0xca, - 0x38, 0x41, 0x0c, 0xbd, 0xc1, 0x8c, 0x33, 0xcc, 0xd4, 0x28, 0x3d, 0x48, 0xeb, 0x76, 0x18, 0x92, - 0xac, 0x46, 0x24, 0x2b, 0xed, 0x8a, 0x7d, 0xbc, 0x69, 0x57, 0xd0, 0x4d, 0x18, 0x13, 0xb9, 0x86, - 0xc5, 0xca, 0xcd, 0x1b, 0xd2, 0xb8, 0x31, 0xac, 0x03, 0x0f, 0x92, 0x05, 0xd8, 0xac, 0x8c, 0xb6, - 0xe0, 0xbc, 0x96, 0xfb, 0xe7, 0x5a, 0xe0, 0x30, 0x95, 0xba, 0xcb, 0x8e, 0x53, 0xed, 0xae, 0x7e, - 0xec, 0xc1, 0xfe, 0xdc, 0xf9, 0xf5, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x0b, 0x4e, 0x73, 0xc7, - 0xda, 0x12, 0x71, 0xea, 0x0d, 0xd7, 0x53, 0xcc, 0x00, 0xdf, 0xf2, 0x67, 0x1f, 0xec, 0xcf, 0x9d, - 0x5e, 0x48, 0x43, 0xc0, 0xe9, 0xf5, 0xd0, 0xab, 0x50, 0xac, 0x7b, 0xa1, 0x18, 0x83, 0x21, 0x23, - 0xbd, 0x52, 0xb1, 0xb4, 0x56, 0x55, 0xdf, 0x1f, 0xff, 0xc1, 0x71, 0x05, 0xb4, 0xc5, 0x25, 0xb6, - 0x4a, 0x40, 0x32, 0xdc, 0x11, 0x12, 0x28, 0x29, 0x6a, 0x33, 0x5c, 0xeb, 0xb8, 0xaa, 0x42, 0x59, - 0x9c, 0x1b, 0x5e, 0x77, 0x06, 0x61, 0xf4, 0x3a, 0x20, 0xfa, 0x82, 0x70, 0x6b, 0x64, 0xa1, 0xc6, - 0xd2, 0x2a, 0x30, 0x01, 0x77, 0xc1, 0x74, 0xf6, 0xaa, 0x76, 0x60, 0xe0, 0x94, 0x5a, 0xe8, 0x3a, - 0x3d, 0x55, 0xf4, 0x52, 0x71, 0x6a, 0xa9, 0x64, 0x78, 0x25, 0xd2, 0x0a, 0x48, 0xcd, 0x89, 0x48, - 0xdd, 0xa4, 0x88, 0x13, 0xf5, 0x50, 0x1d, 0xce, 0x39, 0xed, 0xc8, 0x67, 0xc2, 0x70, 0x13, 0x75, - 0xdd, 0xdf, 0x21, 0x1e, 0xd3, 0x43, 0x15, 0x16, 0x2f, 0x52, 0x6e, 0x63, 0xa1, 0x0b, 0x1e, 0xee, - 0x4a, 0x85, 0x72, 0x89, 0x2a, 0xfb, 0x2d, 0x98, 0x91, 0x8e, 0x52, 0x32, 0xe0, 0xbe, 0x00, 0x23, - 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0x9e, 0x1f, 0xec, 0x88, 0x78, 0x95, 0x71, 0x8c, 0xe3, 0x18, - 0x84, 0x75, 0x3c, 0xfa, 0x0c, 0x64, 0x56, 0x12, 0xe5, 0x12, 0x53, 0x50, 0x17, 0xe2, 0x33, 0xe6, - 0x3a, 0x2f, 0xc6, 0x12, 0x2e, 0x51, 0xcb, 0x95, 0x25, 0xa6, 0x6c, 0x4e, 0xa0, 0x96, 0x2b, 0x4b, - 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9d, 0x80, 0x54, 0x02, 0xbf, 0x46, 0x42, 0x2d, 0xb2, 0xf6, - 0xa3, 0x3c, 0x1a, 0x27, 0x5d, 0xae, 0xd5, 0x34, 0x04, 0x9c, 0x5e, 0x0f, 0x91, 0xce, 0xbc, 0x57, - 0xe3, 0xd9, 0x5a, 0x82, 0x4e, 0x7e, 0xa6, 0xcf, 0xd4, 0x57, 0x1e, 0x4c, 0xaa, 0x8c, 0x5b, 0x3c, - 0xfe, 0x66, 0x38, 0x33, 0xc1, 0xd6, 0x76, 0xff, 0xc1, 0x3b, 0x95, 0xde, 0xa5, 0x9c, 0xa0, 0x84, - 0x3b, 0x68, 0x1b, 0xc1, 0xac, 0x26, 0x7b, 0xa6, 0x43, 0xbe, 0x0a, 0xc5, 0xb0, 0xbd, 0x51, 0xf7, - 0x9b, 0x8e, 0xeb, 0x31, 0x65, 0xb3, 0xf6, 0x1e, 0xa9, 0x4a, 0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x14, - 0x1c, 0xa9, 0x54, 0x41, 0xd9, 0x31, 0x50, 0x94, 0x2a, 0x85, 0x87, 0x05, 0x90, 0x6a, 0x14, 0x55, - 0x17, 0xbd, 0x02, 0x63, 0xc2, 0x31, 0x54, 0x24, 0x7b, 0x9c, 0x36, 0xbd, 0x77, 0xaa, 0x3a, 0x10, - 0x9b, 0xb8, 0xe8, 0x36, 0x8c, 0x44, 0x7e, 0x83, 0xb9, 0xa0, 0x50, 0x36, 0xef, 0x4c, 0x76, 0x1c, - 0xb5, 0x75, 0x85, 0xa6, 0xcb, 0x33, 0x55, 0x55, 0xac, 0xd3, 0x41, 0xeb, 0x7c, 0xbd, 0xb3, 0x08, - 0xd3, 0x24, 0x9c, 0x79, 0x24, 0xfb, 0x4e, 0x52, 0x81, 0xa8, 0xcd, 0xed, 0x20, 0x6a, 0x62, 0x9d, - 0x0c, 0xba, 0x06, 0x53, 0xad, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0xfa, 0xb4, 0x19, 0x33, 0xbd, 0x4d, - 0x25, 0x89, 0x80, 0x3b, 0xeb, 0x30, 0xbf, 0x5e, 0x51, 0x38, 0x73, 0x96, 0xe7, 0x83, 0xe6, 0xcf, - 0x3b, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb2, 0x93, 0x98, 0x4b, 0x26, 0x66, 0x66, 0xb3, 0xc3, 0xae, - 0xe8, 0x12, 0x0c, 0xce, 0xbc, 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0xea, 0x5a, 0xe2, 0x40, 0xfa, 0x62, - 0x08, 0x67, 0xce, 0x75, 0x31, 0x55, 0x4b, 0x3c, 0x2f, 0x62, 0x86, 0xc0, 0x28, 0x0e, 0x71, 0x82, - 0x26, 0xfa, 0x24, 0x4c, 0x8a, 0x30, 0x6f, 0xf1, 0x30, 0x9d, 0x8f, 0x0d, 0x7b, 0x71, 0x02, 0x86, - 0x3b, 0xb0, 0x79, 0xe4, 0x7d, 0x67, 0xa3, 0x41, 0xc4, 0xd1, 0x77, 0xd3, 0xf5, 0x76, 0xc2, 0x99, - 0x0b, 0xec, 0x7c, 0x10, 0x91, 0xf7, 0x93, 0x50, 0x9c, 0x52, 0x03, 0xad, 0xc3, 0x64, 0x2b, 0x20, - 0xa4, 0xc9, 0x18, 0x7d, 0x71, 0x9f, 0xcd, 0x71, 0xb7, 0x76, 0xda, 0x93, 0x4a, 0x02, 0x76, 0x90, - 0x52, 0x86, 0x3b, 0x28, 0xa0, 0x7b, 0x50, 0xf0, 0x77, 0x49, 0xb0, 0x4d, 0x9c, 0xfa, 0xcc, 0xc5, - 0x2e, 0x86, 0xe6, 0xe2, 0x72, 0xbb, 0x25, 0x70, 0x13, 0x3a, 0x78, 0x59, 0xdc, 0x5b, 0x07, 0x2f, - 0x1b, 0x43, 0x3f, 0x64, 0xc1, 0x59, 0x29, 0xb6, 0xaf, 0xb6, 0xe8, 0xa8, 0x2f, 0xf9, 0x5e, 0x18, - 0x05, 0xdc, 0x11, 0xfb, 0xb1, 0x6c, 0xe7, 0xe4, 0xf5, 0x8c, 0x4a, 0x4a, 0x38, 0x7a, 0x36, 0x0b, - 0x23, 0xc4, 0xd9, 0x2d, 0xa2, 0x25, 0x98, 0x0a, 0x49, 0x24, 0x0f, 0xa3, 0x85, 0x70, 0xe5, 0x8d, - 0xd2, 0xda, 0xcc, 0x25, 0xee, 0x45, 0x4e, 0x37, 0x43, 0x35, 0x09, 0xc4, 0x9d, 0xf8, 0xb3, 0xdf, - 0x0a, 0x53, 0x1d, 0xd7, 0xff, 0x61, 0x32, 0x8a, 0xcc, 0xee, 0xc0, 0x98, 0x31, 0xc4, 0xc7, 0xaa, - 0xc3, 0xfd, 0x97, 0xc3, 0x50, 0x54, 0xfa, 0x3d, 0x74, 0xd5, 0x54, 0xdb, 0x9e, 0x4d, 0xaa, 0x6d, - 0x0b, 0xf4, 0x5d, 0xaf, 0x6b, 0x6a, 0xd7, 0x53, 0x62, 0x67, 0x65, 0x6d, 0xe8, 0xfe, 0x9d, 0xa2, - 0x35, 0x71, 0x6d, 0xbe, 0x6f, 0xfd, 0xef, 0x40, 0x57, 0x09, 0xf0, 0x35, 0x98, 0xf2, 0x7c, 0xc6, - 0x73, 0x92, 0xba, 0x64, 0x28, 0x18, 0xdf, 0x50, 0xd4, 0x83, 0x51, 0x24, 0x10, 0x70, 0x67, 0x1d, - 0xda, 0x20, 0xbf, 0xf8, 0x93, 0x22, 0x67, 0xce, 0x17, 0x60, 0x01, 0x45, 0x97, 0x60, 0xb0, 0xe5, - 0xd7, 0xcb, 0x15, 0xc1, 0x6f, 0x6a, 0xa9, 0x6e, 0xeb, 0xe5, 0x0a, 0xe6, 0x30, 0xb4, 0x00, 0x43, - 0xec, 0x47, 0x38, 0x33, 0x9a, 0x1d, 0x75, 0x80, 0xd5, 0xd0, 0xf2, 0xb5, 0xb0, 0x0a, 0x58, 0x54, - 0x64, 0xa2, 0x2f, 0xca, 0xa4, 0x33, 0xd1, 0xd7, 0xf0, 0x43, 0x8a, 0xbe, 0x24, 0x01, 0x1c, 0xd3, - 0x42, 0xf7, 0xe1, 0xb4, 0xf1, 0x30, 0xe2, 0x4b, 0x84, 0x84, 0xc2, 0xf3, 0xf9, 0x52, 0xd7, 0x17, - 0x91, 0xd0, 0x17, 0x9f, 0x17, 0x9d, 0x3e, 0x5d, 0x4e, 0xa3, 0x84, 0xd3, 0x1b, 0x40, 0x0d, 0x98, - 0xaa, 0x75, 0xb4, 0x5a, 0xe8, 0xbf, 0x55, 0x35, 0xa1, 0x9d, 0x2d, 0x76, 0x12, 0x46, 0xaf, 0x40, - 0xe1, 0x1d, 0x3f, 0x64, 0x67, 0xb5, 0xe0, 0x91, 0xa5, 0xdb, 0x6c, 0xe1, 0x8d, 0x5b, 0x55, 0x56, - 0x7e, 0xb0, 0x3f, 0x37, 0x52, 0xf1, 0xeb, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0xf7, 0x5a, 0x30, 0xdb, - 0xf9, 0xf2, 0x52, 0x9d, 0x1e, 0xeb, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x93, 0x1c, 0xee, - 0xd2, 0x94, 0xfd, 0xcb, 0x5c, 0xb7, 0x2b, 0x34, 0x40, 0x24, 0x6c, 0x37, 0x4e, 0x22, 0x4d, 0xe5, - 0xb2, 0xa1, 0x9c, 0x7a, 0x68, 0xfb, 0x81, 0x7f, 0x66, 0x31, 0xfb, 0x81, 0x13, 0x74, 0x14, 0x78, - 0x03, 0x0a, 0x91, 0x4c, 0x36, 0xda, 0x25, 0xb3, 0xa6, 0xd6, 0x29, 0x66, 0x43, 0xa1, 0x38, 0x56, - 0x95, 0x57, 0x54, 0x91, 0xb1, 0xff, 0x21, 0x9f, 0x01, 0x09, 0x39, 0x01, 0x1d, 0x40, 0xc9, 0xd4, - 0x01, 0xcc, 0xf5, 0xf8, 0x82, 0x0c, 0x5d, 0xc0, 0x3f, 0x30, 0xfb, 0xcd, 0x24, 0x35, 0xef, 0x77, - 0xc3, 0x15, 0xfb, 0x47, 0x2c, 0x38, 0x95, 0x66, 0xe9, 0x49, 0x5f, 0x19, 0x5c, 0x4e, 0xa4, 0x0c, - 0x79, 0xd4, 0x08, 0xde, 0x11, 0xe5, 0x58, 0x61, 0xf4, 0x9d, 0xed, 0xea, 0x70, 0xd1, 0x5f, 0x6f, - 0xc1, 0x58, 0x25, 0x20, 0xda, 0x85, 0xf6, 0x1a, 0x77, 0xa3, 0xe6, 0xfd, 0x79, 0xea, 0xd0, 0x2e, - 0xd4, 0xf6, 0xcf, 0xe4, 0xe0, 0x14, 0xd7, 0xc4, 0x2f, 0xec, 0xfa, 0x6e, 0xbd, 0xe2, 0xd7, 0x45, - 0xa6, 0xb2, 0x37, 0x61, 0xb4, 0xa5, 0x09, 0xf7, 0xba, 0x45, 0x32, 0xd4, 0x85, 0x80, 0xb1, 0x38, - 0x42, 0x2f, 0xc5, 0x06, 0x2d, 0x54, 0x87, 0x51, 0xb2, 0xeb, 0xd6, 0x94, 0x3a, 0x37, 0x77, 0xe8, - 0xcb, 0x45, 0xb5, 0xb2, 0xac, 0xd1, 0xc1, 0x06, 0xd5, 0x63, 0xc8, 0x41, 0x6b, 0xff, 0xa8, 0x05, - 0x8f, 0x64, 0xc4, 0x3d, 0xa4, 0xcd, 0xdd, 0x63, 0x36, 0x0f, 0x22, 0x9d, 0xa5, 0x6a, 0x8e, 0x5b, - 0x42, 0x60, 0x01, 0x45, 0x9f, 0x02, 0xe0, 0x96, 0x0c, 0xf4, 0x99, 0xdb, 0x2b, 0x40, 0x9c, 0x11, - 0xdb, 0x4a, 0x0b, 0x53, 0x24, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x53, 0x79, 0x18, 0xe4, 0xe9, 0xc4, - 0x57, 0x60, 0x78, 0x9b, 0xe7, 0x6f, 0xe8, 0x27, 0x55, 0x44, 0x2c, 0x80, 0xe0, 0x05, 0x58, 0x56, - 0x46, 0xab, 0x30, 0xcd, 0xf3, 0x5f, 0x34, 0x4a, 0xa4, 0xe1, 0xec, 0x49, 0x69, 0x19, 0xcf, 0x1d, - 0xa9, 0xa4, 0x86, 0xe5, 0x4e, 0x14, 0x9c, 0x56, 0x0f, 0xbd, 0x06, 0xe3, 0xf4, 0xf5, 0xe2, 0xb7, - 0x23, 0x49, 0x89, 0x67, 0xbe, 0x50, 0xcf, 0xa5, 0x75, 0x03, 0x8a, 0x13, 0xd8, 0xf4, 0x01, 0xdd, - 0xea, 0x90, 0x0b, 0x0e, 0xc6, 0x0f, 0x68, 0x53, 0x16, 0x68, 0xe2, 0x32, 0x13, 0xcf, 0x36, 0x33, - 0x68, 0x5d, 0xdf, 0x0e, 0x48, 0xb8, 0xed, 0x37, 0xea, 0x8c, 0xd1, 0x1a, 0xd4, 0x4c, 0x3c, 0x13, - 0x70, 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x74, 0xdc, 0x46, 0x3b, 0x20, 0x31, 0x95, 0x21, 0x93, 0xca, - 0x4a, 0x02, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xba, 0x12, 0xf8, 0xf4, 0xf0, 0x92, 0xc1, 0x5c, - 0x94, 0xdd, 0xee, 0xb0, 0xf4, 0x3b, 0xed, 0x12, 0xf6, 0x4c, 0x58, 0x36, 0x72, 0x0a, 0x86, 0xd2, - 0xbe, 0x2a, 0x3c, 0x4e, 0x25, 0x15, 0xf4, 0x0c, 0x8c, 0x88, 0xac, 0x06, 0xcc, 0xbc, 0x94, 0x4f, - 0x1d, 0x33, 0x32, 0x28, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0xbf, 0x2f, 0x07, 0xd3, 0x29, 0xfe, 0x01, - 0xfc, 0xa8, 0xda, 0x72, 0xc3, 0x48, 0xe5, 0xc7, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, - 0x0f, 0xfc, 0x30, 0x4c, 0x1e, 0x80, 0xc2, 0xfe, 0x56, 0x40, 0x0f, 0x99, 0x69, 0xee, 0x22, 0x0c, - 0xb4, 0x43, 0x22, 0x03, 0x16, 0xaa, 0xf3, 0x9b, 0xe9, 0x9e, 0x18, 0x84, 0xb2, 0xc7, 0x5b, 0x4a, - 0x8d, 0xa3, 0xb1, 0xc7, 0x5c, 0x91, 0xc3, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x4c, - 0x74, 0x1c, 0x79, 0x8b, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x94, 0x87, 0xb3, 0x99, 0x1e, 0x43, 0xb4, - 0xeb, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x59, 0x6f, 0xf0, 0x68, 0x5b, 0xa4, 0xb5, 0xbd, 0x2a, 0xca, - 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x90, 0x49, 0xae, 0x3a, 0x32, 0x05, 0x2e, 0x96, 0x78, 0xf8, 0x15, - 0x0e, 0xee, 0x3b, 0x0b, 0xeb, 0x25, 0x18, 0x68, 0xf9, 0x7e, 0x23, 0x79, 0x68, 0xd1, 0xee, 0xfa, - 0x7e, 0x03, 0x33, 0x20, 0xfa, 0x88, 0x18, 0xaf, 0x84, 0xb9, 0x02, 0x76, 0xea, 0x7e, 0xa8, 0x0d, - 0xda, 0x13, 0x30, 0xbc, 0x43, 0xf6, 0x02, 0xd7, 0xdb, 0x4a, 0x9a, 0xb1, 0xdc, 0xe0, 0xc5, 0x58, - 0xc2, 0xcd, 0xa4, 0x4f, 0xc3, 0x47, 0x9d, 0x3e, 0xb5, 0xd0, 0xf3, 0x0a, 0xfc, 0x81, 0x3c, 0x4c, - 0xe0, 0xc5, 0xd2, 0x07, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3a, 0x7d, 0x6a, 0xef, 0xd9, 0xf8, - 0x05, 0x0b, 0x26, 0x58, 0x6e, 0x05, 0x11, 0xa7, 0xc9, 0xf5, 0xbd, 0x13, 0x60, 0xf1, 0x2e, 0xc1, - 0x60, 0x40, 0x1b, 0x4d, 0xa6, 0x08, 0x64, 0x3d, 0xc1, 0x1c, 0x86, 0xce, 0xc1, 0x00, 0xeb, 0x02, - 0x9d, 0xbc, 0x51, 0x9e, 0x5d, 0xa9, 0xe4, 0x44, 0x0e, 0x66, 0xa5, 0x2c, 0xf8, 0x08, 0x26, 0xad, - 0x86, 0xcb, 0x3b, 0x1d, 0xeb, 0x15, 0xdf, 0x1f, 0xbe, 0xc4, 0xa9, 0x5d, 0x7b, 0x6f, 0xc1, 0x47, - 0xd2, 0x49, 0x76, 0x7f, 0x3e, 0xfd, 0x51, 0x0e, 0x2e, 0xa4, 0xd6, 0xeb, 0x3b, 0xf8, 0x48, 0xf7, - 0xda, 0xc7, 0x19, 0x83, 0x3f, 0x7f, 0x82, 0x46, 0x82, 0x03, 0xfd, 0x72, 0x98, 0x83, 0x7d, 0xc4, - 0x04, 0x49, 0x1d, 0xb2, 0xf7, 0x49, 0x4c, 0x90, 0xd4, 0xbe, 0x65, 0x3c, 0xff, 0xfe, 0x22, 0x97, - 0xf1, 0x2d, 0xec, 0x21, 0x78, 0x85, 0x9e, 0x33, 0x0c, 0x18, 0x0a, 0x8e, 0x79, 0x94, 0x9f, 0x31, - 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x80, 0x89, 0xa6, 0xeb, 0xd1, 0xc3, 0x67, 0xcf, 0x64, 0xfc, 0x54, - 0xc8, 0xa6, 0x55, 0x13, 0x8c, 0x93, 0xf8, 0xc8, 0xd5, 0xe2, 0x85, 0xe4, 0xb2, 0x93, 0x6e, 0x67, - 0xf6, 0x76, 0xde, 0xd4, 0xb9, 0xaa, 0x51, 0x4c, 0x89, 0x1d, 0xb2, 0xaa, 0xbd, 0xff, 0xf3, 0xfd, - 0xbf, 0xff, 0x47, 0xd3, 0xdf, 0xfe, 0xb3, 0xaf, 0xc0, 0xd8, 0x43, 0x0b, 0x7c, 0xed, 0xaf, 0xe6, - 0xe1, 0xd1, 0x2e, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, 0x1c, 0x68, 0x67, 0x7d, 0xc7, 0x3c, 0x54, 0xe0, - 0xd4, 0x66, 0xbb, 0xd1, 0xd8, 0x63, 0x76, 0xf8, 0xa4, 0x2e, 0x31, 0x04, 0x4f, 0x79, 0x4e, 0xe6, - 0xb3, 0x5a, 0x49, 0xc1, 0xc1, 0xa9, 0x35, 0x29, 0x43, 0x4f, 0x6f, 0x92, 0x3d, 0x45, 0x2a, 0xc1, - 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0xa6, 0x9c, 0x5d, 0xc7, 0xe5, 0x41, 0x57, 0x25, - 0x01, 0xce, 0xd1, 0x2b, 0x39, 0xdd, 0x42, 0x12, 0x01, 0x77, 0xd6, 0x41, 0xaf, 0x03, 0xf2, 0x45, - 0xee, 0xff, 0x6b, 0xc4, 0x13, 0xaa, 0x31, 0x36, 0x77, 0xf9, 0xf8, 0x48, 0xb8, 0xd5, 0x81, 0x81, - 0x53, 0x6a, 0x25, 0xe2, 0x6f, 0x0c, 0x65, 0xc7, 0xdf, 0xe8, 0x7e, 0x2e, 0xf6, 0x4c, 0xff, 0xf0, - 0x9f, 0x2c, 0x7a, 0x7d, 0x71, 0x26, 0xdf, 0x0c, 0x23, 0xf7, 0x0a, 0x33, 0x6d, 0xe3, 0x32, 0x3c, - 0x2d, 0x6a, 0xc4, 0x69, 0xcd, 0xb4, 0x2d, 0x06, 0x62, 0x13, 0x97, 0x2f, 0x88, 0x30, 0x76, 0x56, - 0x34, 0x58, 0x7c, 0x11, 0xeb, 0x46, 0x61, 0xa0, 0x4f, 0xc3, 0x70, 0xdd, 0xdd, 0x75, 0x43, 0x3f, - 0x10, 0x2b, 0xfd, 0x90, 0xea, 0x82, 0xf8, 0x1c, 0x2c, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0x20, - 0x07, 0x63, 0xb2, 0xc5, 0x37, 0xda, 0x7e, 0xe4, 0x9c, 0xc0, 0xb5, 0x7c, 0xcd, 0xb8, 0x96, 0x3f, - 0xd2, 0x2d, 0xe0, 0x0f, 0xeb, 0x52, 0xe6, 0x75, 0x7c, 0x2b, 0x71, 0x1d, 0x3f, 0xde, 0x9b, 0x54, - 0xf7, 0x6b, 0xf8, 0x1f, 0x59, 0x30, 0x65, 0xe0, 0x9f, 0xc0, 0x6d, 0xb0, 0x62, 0xde, 0x06, 0x8f, - 0xf5, 0xfc, 0x86, 0x8c, 0x5b, 0xe0, 0xbb, 0xf3, 0x89, 0xbe, 0xb3, 0xd3, 0xff, 0x1d, 0x18, 0xd8, - 0x76, 0x82, 0x7a, 0xb7, 0x00, 0xe7, 0x1d, 0x95, 0xe6, 0xaf, 0x3b, 0x81, 0xd0, 0x0d, 0x3e, 0xa5, - 0x72, 0x5e, 0x3b, 0x41, 0x6f, 0xbd, 0x20, 0x6b, 0x0a, 0xbd, 0x04, 0x43, 0x61, 0xcd, 0x6f, 0x29, - 0xcb, 0xf9, 0x8b, 0x3c, 0x1f, 0x36, 0x2d, 0x39, 0xd8, 0x9f, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, - 0x7c, 0xf4, 0x26, 0x8c, 0xb1, 0x5f, 0xca, 0x50, 0x27, 0x9f, 0x9d, 0x0c, 0xa9, 0xaa, 0x23, 0x72, - 0x2b, 0x36, 0xa3, 0x08, 0x9b, 0xa4, 0x66, 0xb7, 0xa0, 0xa8, 0x3e, 0xeb, 0x58, 0xf5, 0x71, 0xff, - 0x2e, 0x0f, 0xd3, 0x29, 0x6b, 0x0e, 0x85, 0xc6, 0x4c, 0x3c, 0xd3, 0xe7, 0x52, 0x7d, 0x8f, 0x73, - 0x11, 0xb2, 0xd7, 0x50, 0x5d, 0xac, 0xad, 0xbe, 0x1b, 0xbd, 0x1d, 0x92, 0x64, 0xa3, 0xb4, 0xa8, - 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0xb1, 0xce, 0xe9, 0x9f, 0xe6, - 0xe1, 0x54, 0x5a, 0x0c, 0x32, 0xf4, 0xf9, 0x44, 0x62, 0xbc, 0xe7, 0xfb, 0x8d, 0x5e, 0xc6, 0xb3, - 0xe5, 0x71, 0x19, 0xf0, 0xe2, 0xbc, 0x99, 0x2a, 0xaf, 0xe7, 0x30, 0x8b, 0x36, 0x99, 0x23, 0x7e, - 0xc0, 0x13, 0x1a, 0xca, 0xe3, 0xe3, 0xe3, 0x7d, 0x77, 0x40, 0x64, 0x42, 0x0c, 0x13, 0x46, 0x00, - 0xb2, 0xb8, 0xb7, 0x11, 0x80, 0x6c, 0x79, 0xd6, 0x85, 0x11, 0xed, 0x6b, 0x8e, 0x75, 0xc6, 0x77, - 0xe8, 0x6d, 0xa5, 0xf5, 0xfb, 0x58, 0x67, 0xfd, 0x47, 0x2d, 0x48, 0xd8, 0x85, 0x2b, 0xb1, 0x98, - 0x95, 0x29, 0x16, 0xbb, 0x08, 0x03, 0x81, 0xdf, 0x20, 0xc9, 0x3c, 0x74, 0xd8, 0x6f, 0x10, 0xcc, - 0x20, 0x14, 0x23, 0x8a, 0x85, 0x1d, 0xa3, 0xfa, 0x43, 0x4e, 0x3c, 0xd1, 0x2e, 0xc1, 0x60, 0x83, - 0xec, 0x92, 0x46, 0x32, 0x5d, 0xc8, 0x4d, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0x85, 0x01, 0x38, 0xdf, - 0x35, 0x94, 0x05, 0x7d, 0x0e, 0x6d, 0x39, 0x11, 0xb9, 0xe7, 0xec, 0x25, 0xe3, 0xfa, 0x5f, 0xe3, - 0xc5, 0x58, 0xc2, 0x99, 0xe7, 0x0e, 0x0f, 0xcf, 0x9b, 0x10, 0x22, 0x8a, 0xa8, 0xbc, 0x02, 0x6a, - 0x0a, 0xa5, 0xf2, 0x47, 0x21, 0x94, 0x7a, 0x16, 0x20, 0x0c, 0x1b, 0xdc, 0x7a, 0xa6, 0x2e, 0x5c, - 0x82, 0xe2, 0x30, 0xce, 0xd5, 0x9b, 0x02, 0x82, 0x35, 0x2c, 0x54, 0x82, 0xc9, 0x56, 0xe0, 0x47, - 0x5c, 0x26, 0x5b, 0xe2, 0x06, 0x66, 0x83, 0x66, 0x14, 0x81, 0x4a, 0x02, 0x8e, 0x3b, 0x6a, 0xa0, - 0x17, 0x60, 0x44, 0x44, 0x16, 0xa8, 0xf8, 0x7e, 0x43, 0x88, 0x81, 0x94, 0xcd, 0x55, 0x35, 0x06, - 0x61, 0x1d, 0x4f, 0xab, 0xc6, 0x04, 0xbd, 0xc3, 0xa9, 0xd5, 0xb8, 0xb0, 0x57, 0xc3, 0x4b, 0xc4, - 0x23, 0x2c, 0xf4, 0x15, 0x8f, 0x30, 0x16, 0x8c, 0x15, 0xfb, 0xd6, 0x6d, 0x41, 0x4f, 0x51, 0xd2, - 0xcf, 0x0e, 0xc0, 0xb4, 0x58, 0x38, 0xc7, 0xbd, 0x5c, 0x6e, 0x77, 0x2e, 0x97, 0xa3, 0x10, 0x9d, - 0x7d, 0xb0, 0x66, 0x4e, 0x7a, 0xcd, 0xfc, 0xa0, 0x05, 0x26, 0x7b, 0x85, 0xfe, 0xdf, 0xcc, 0xc4, - 0x28, 0x2f, 0x64, 0xb2, 0x6b, 0x75, 0x79, 0x81, 0xbc, 0xc7, 0x14, 0x29, 0xf6, 0x7f, 0xb0, 0xe0, - 0xb1, 0x9e, 0x14, 0xd1, 0x32, 0x14, 0x19, 0x0f, 0xa8, 0xbd, 0xce, 0x1e, 0x57, 0x06, 0xa8, 0x12, - 0x90, 0xc1, 0x92, 0xc6, 0x35, 0xd1, 0x72, 0x47, 0x06, 0x9a, 0x27, 0x52, 0x32, 0xd0, 0x9c, 0x36, - 0x86, 0xe7, 0x21, 0x53, 0xd0, 0x7c, 0x3f, 0xbd, 0x71, 0x0c, 0xe7, 0x0f, 0xf4, 0x71, 0x43, 0xec, - 0x67, 0x27, 0xc4, 0x7e, 0xc8, 0xc4, 0xd6, 0xee, 0x90, 0x4f, 0xc2, 0x24, 0x0b, 0x39, 0xc4, 0xcc, - 0xa1, 0x85, 0x5b, 0x4a, 0x2e, 0x36, 0x79, 0xbc, 0x99, 0x80, 0xe1, 0x0e, 0x6c, 0xfb, 0x0f, 0xf3, - 0x30, 0xc4, 0xb7, 0xdf, 0x09, 0xbc, 0x09, 0x9f, 0x84, 0xa2, 0xdb, 0x6c, 0xb6, 0x79, 0x52, 0x91, - 0x41, 0xee, 0x8b, 0x4a, 0xe7, 0xa9, 0x2c, 0x0b, 0x71, 0x0c, 0x47, 0x2b, 0x42, 0xe2, 0xdc, 0x25, - 0xaa, 0x21, 0xef, 0xf8, 0x7c, 0xc9, 0x89, 0x1c, 0xce, 0xe0, 0xa8, 0x7b, 0x36, 0x96, 0x4d, 0xa3, - 0xcf, 0x02, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x04, 0xf1, 0xfc, 0x68, 0x17, 0x6a, 0x55, - 0x85, 0xcc, 0x69, 0xc6, 0x67, 0x8e, 0x02, 0x60, 0x8d, 0x22, 0x9a, 0x37, 0x6e, 0xfa, 0xd9, 0xc4, - 0xdc, 0x01, 0xa7, 0x1a, 0xcf, 0xd9, 0xec, 0x8b, 0x50, 0x54, 0xc4, 0x7b, 0xc9, 0x9f, 0x46, 0x75, - 0xb6, 0xe8, 0x13, 0x30, 0x91, 0xe8, 0xdb, 0xa1, 0xc4, 0x57, 0xbf, 0x68, 0xc1, 0x04, 0xef, 0xcc, - 0xb2, 0xb7, 0x2b, 0x6e, 0x83, 0x77, 0xe1, 0x54, 0x23, 0xe5, 0x54, 0x16, 0xd3, 0xdf, 0xff, 0x29, - 0xae, 0xc4, 0x55, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xba, 0x42, 0x77, 0x1c, 0x3d, 0x75, 0x9d, 0x86, - 0x70, 0x4d, 0x1d, 0xe5, 0xbb, 0x8d, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0xc7, 0x82, 0x29, 0xde, 0xf3, - 0x1b, 0x64, 0x4f, 0x9d, 0x4d, 0x5f, 0xcf, 0xbe, 0x8b, 0x74, 0x56, 0xb9, 0x8c, 0x74, 0x56, 0xfa, - 0xa7, 0xe5, 0xbb, 0x7e, 0xda, 0xcf, 0x58, 0x20, 0x56, 0xc8, 0x09, 0x08, 0x21, 0xbe, 0xd5, 0x14, - 0x42, 0xcc, 0x66, 0x6f, 0x82, 0x0c, 0xe9, 0xc3, 0x9f, 0x5b, 0x30, 0xc9, 0x11, 0x62, 0x6d, 0xf9, - 0xd7, 0x75, 0x1e, 0xfa, 0x49, 0x7a, 0x7b, 0x83, 0xec, 0xad, 0xfb, 0x15, 0x27, 0xda, 0x4e, 0xff, - 0x28, 0x63, 0xb2, 0x06, 0xba, 0x4e, 0x56, 0x5d, 0x6e, 0xa0, 0x43, 0x64, 0xd2, 0x3e, 0x74, 0xb6, - 0x07, 0xfb, 0x6b, 0x16, 0x20, 0xde, 0x8c, 0xc1, 0xb8, 0x51, 0x76, 0x88, 0x95, 0x6a, 0x17, 0x5d, - 0x7c, 0x34, 0x29, 0x08, 0xd6, 0xb0, 0x8e, 0x64, 0x78, 0x12, 0x26, 0x0f, 0xf9, 0xde, 0x26, 0x0f, - 0x87, 0x18, 0xd1, 0x7f, 0x35, 0x04, 0x49, 0x07, 0x18, 0x74, 0x07, 0x46, 0x6b, 0x4e, 0xcb, 0xd9, - 0x70, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x9b, 0xad, 0xd4, 0x92, 0x86, 0x27, 0x94, 0xd4, 0x5a, 0x09, - 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0x64, 0x25, 0xcc, 0x19, - 0x9e, 0x1b, 0x00, 0xc9, 0x52, 0xac, 0x61, 0xa4, 0x78, 0x1b, 0xe7, 0x8f, 0xd9, 0xdb, 0x18, 0x4e, - 0xcc, 0xdb, 0x78, 0xe0, 0x50, 0xde, 0xc6, 0x85, 0x43, 0x7b, 0x1b, 0x0f, 0xf6, 0xe5, 0x6d, 0x8c, - 0xe1, 0x8c, 0xe4, 0x3d, 0xe9, 0xff, 0x15, 0xb7, 0x41, 0xc4, 0x83, 0x83, 0x7b, 0xf0, 0xcf, 0x3e, - 0xd8, 0x9f, 0x3b, 0x83, 0x53, 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x29, 0x98, 0x71, 0x1a, 0x0d, 0xff, - 0x9e, 0x9a, 0xd4, 0xe5, 0xb0, 0xe6, 0x34, 0xb8, 0x12, 0x62, 0x98, 0x51, 0x3d, 0xf7, 0x60, 0x7f, - 0x6e, 0x66, 0x21, 0x03, 0x07, 0x67, 0xd6, 0x46, 0xaf, 0x42, 0xb1, 0x15, 0xf8, 0xb5, 0x55, 0xcd, - 0x4b, 0xef, 0x02, 0x1d, 0xc0, 0x8a, 0x2c, 0x3c, 0xd8, 0x9f, 0x1b, 0x53, 0x7f, 0xd8, 0x85, 0x1f, - 0x57, 0x48, 0x71, 0x1f, 0x1e, 0x39, 0x52, 0xf7, 0xe1, 0x1d, 0x98, 0xae, 0x92, 0xc0, 0x65, 0x79, - 0xb7, 0xeb, 0xf1, 0xf9, 0xb4, 0x0e, 0xc5, 0x20, 0x71, 0x22, 0xf7, 0x15, 0x69, 0x50, 0x0b, 0xbb, - 0x2f, 0x4f, 0xe0, 0x98, 0x90, 0xfd, 0xbf, 0x2c, 0x18, 0x16, 0x0e, 0x2f, 0x27, 0xc0, 0x35, 0x2e, - 0x18, 0x9a, 0x84, 0xb9, 0xf4, 0x01, 0x63, 0x9d, 0xc9, 0xd4, 0x21, 0x94, 0x13, 0x3a, 0x84, 0xc7, - 0xba, 0x11, 0xe9, 0xae, 0x3d, 0xf8, 0x6b, 0x79, 0xca, 0xbd, 0x1b, 0xae, 0x97, 0xc7, 0x3f, 0x04, - 0x6b, 0x30, 0x1c, 0x0a, 0xd7, 0xbf, 0x5c, 0xb6, 0xad, 0x7a, 0x72, 0x12, 0x63, 0x3b, 0x36, 0xe1, - 0xec, 0x27, 0x89, 0xa4, 0xfa, 0x14, 0xe6, 0x8f, 0xd1, 0xa7, 0xb0, 0x97, 0x73, 0xea, 0xc0, 0x51, - 0x38, 0xa7, 0xda, 0x5f, 0x61, 0x37, 0xa7, 0x5e, 0x7e, 0x02, 0x4c, 0xd5, 0x35, 0xf3, 0x8e, 0xb5, - 0xbb, 0xac, 0x2c, 0xd1, 0xa9, 0x0c, 0xe6, 0xea, 0xe7, 0x2d, 0x38, 0x9f, 0xf2, 0x55, 0x1a, 0xa7, - 0xf5, 0x14, 0x14, 0x9c, 0x76, 0xdd, 0x55, 0x7b, 0x59, 0xd3, 0x27, 0x2e, 0x88, 0x72, 0xac, 0x30, - 0xd0, 0x12, 0x4c, 0x91, 0xfb, 0x2d, 0x97, 0xab, 0x52, 0x75, 0x63, 0xd3, 0x3c, 0xf7, 0x92, 0x5a, - 0x4e, 0x02, 0x71, 0x27, 0xbe, 0x0a, 0x08, 0x92, 0xcf, 0x0c, 0x08, 0xf2, 0x77, 0x2d, 0x18, 0x51, - 0xce, 0x6f, 0xc7, 0x3e, 0xda, 0x9f, 0x34, 0x47, 0xfb, 0xd1, 0x2e, 0xa3, 0x9d, 0x31, 0xcc, 0xbf, - 0x9d, 0x53, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x07, 0x07, 0xf7, 0x12, 0x14, 0x5a, 0x81, 0x1f, 0xf9, - 0x35, 0xbf, 0x21, 0x18, 0xb8, 0x73, 0x71, 0x64, 0x1c, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x9b, - 0xf2, 0x4e, 0x4e, 0xab, 0x25, 0x01, 0xd2, 0x06, 0x8d, 0xc5, 0x8d, 0x8d, 0x8b, 0xb1, 0x8e, 0xc3, - 0x06, 0xdc, 0x0f, 0x22, 0xc1, 0x67, 0xc5, 0x03, 0xee, 0x07, 0x11, 0x66, 0x10, 0x54, 0x07, 0x88, - 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0x04, 0xef, 0xca, 0x3e, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x77, - 0xbd, 0x28, 0x8c, 0x82, 0xf9, 0xb2, 0x17, 0xdd, 0x0a, 0xf8, 0x13, 0x52, 0x8b, 0x8e, 0xa3, 0x68, - 0x61, 0x8d, 0xae, 0x74, 0xf4, 0x66, 0x6d, 0x0c, 0x9a, 0xc6, 0x0c, 0x6b, 0xa2, 0x1c, 0x2b, 0x0c, - 0xfb, 0x45, 0x76, 0xfb, 0xb0, 0x31, 0x3d, 0x5c, 0x38, 0x99, 0x5f, 0x2e, 0xaa, 0xd9, 0x60, 0x9a, - 0xcc, 0x92, 0x1e, 0xb4, 0xa6, 0xfb, 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0xb5, 0xe2, 0xc8, 0x36, 0xe8, - 0xdb, 0x3a, 0x0c, 0x54, 0x9e, 0xee, 0x71, 0x6b, 0x1c, 0xc2, 0x24, 0x85, 0x25, 0x91, 0x60, 0x21, - 0xf6, 0xcb, 0x15, 0xb1, 0x2f, 0xb4, 0x24, 0x12, 0x02, 0x80, 0x63, 0x1c, 0x74, 0x55, 0x08, 0x08, - 0xb8, 0x9c, 0xff, 0xd1, 0x84, 0x80, 0x40, 0x7e, 0xbe, 0x26, 0xd5, 0x79, 0x06, 0x46, 0x54, 0x5e, - 0xd8, 0x0a, 0x4f, 0x37, 0x2a, 0x96, 0xcd, 0x72, 0x5c, 0x8c, 0x75, 0x1c, 0xb4, 0x0e, 0x13, 0x21, - 0x97, 0x9b, 0xa9, 0x88, 0xb5, 0x5c, 0xfe, 0xf8, 0x51, 0x69, 0xd5, 0x53, 0x35, 0xc1, 0x07, 0xac, - 0x88, 0x9f, 0x36, 0xd2, 0xb9, 0x3a, 0x49, 0x02, 0xbd, 0x06, 0xe3, 0x0d, 0xdf, 0xa9, 0x2f, 0x3a, - 0x0d, 0xc7, 0xab, 0xb1, 0xef, 0x2d, 0x98, 0xe9, 0x05, 0x6f, 0x1a, 0x50, 0x9c, 0xc0, 0xa6, 0xcc, - 0x98, 0x5e, 0x22, 0xa2, 0x2c, 0x3b, 0xde, 0x16, 0x09, 0x45, 0x96, 0x4f, 0xc6, 0x8c, 0xdd, 0xcc, - 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0x4b, 0x30, 0x2a, 0x3f, 0x5f, 0x8b, 0x45, 0x10, 0x3b, 0x32, 0x68, - 0x30, 0x6c, 0x60, 0xa2, 0x7b, 0x70, 0x5a, 0xfe, 0x5f, 0x0f, 0x9c, 0xcd, 0x4d, 0xb7, 0x26, 0x1c, - 0x74, 0xb9, 0x97, 0xe1, 0x82, 0x74, 0x85, 0x5b, 0x4e, 0x43, 0x3a, 0xd8, 0x9f, 0xbb, 0x28, 0x46, - 0x2d, 0x15, 0xce, 0x26, 0x31, 0x9d, 0x3e, 0x5a, 0x85, 0xe9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0xa5, - 0x6d, 0x52, 0xdb, 0x91, 0x9b, 0x88, 0x45, 0x38, 0xd0, 0xcc, 0xff, 0xaf, 0x77, 0xa2, 0xe0, 0xb4, - 0x7a, 0xe8, 0x2d, 0x98, 0x69, 0xb5, 0x37, 0x1a, 0x6e, 0xb8, 0xbd, 0xe6, 0x47, 0xcc, 0xb4, 0x47, - 0xa5, 0x99, 0x15, 0xa1, 0x10, 0x54, 0x0c, 0x89, 0x4a, 0x06, 0x1e, 0xce, 0xa4, 0x80, 0xde, 0x85, - 0xd3, 0x89, 0xc5, 0x20, 0x9c, 0xc1, 0xc7, 0xb3, 0x63, 0xd6, 0x57, 0xd3, 0x2a, 0x88, 0xb8, 0x0a, - 0x69, 0x20, 0x9c, 0xde, 0x04, 0x7a, 0x1e, 0x0a, 0x6e, 0x6b, 0xc5, 0x69, 0xba, 0x8d, 0x3d, 0x16, - 0x74, 0xbf, 0xc8, 0x02, 0xd1, 0x17, 0xca, 0x15, 0x5e, 0x76, 0xa0, 0xfd, 0xc6, 0x0a, 0x93, 0x3e, - 0x41, 0xb4, 0xd0, 0xa2, 0xe1, 0xcc, 0x64, 0x6c, 0xb9, 0xac, 0xc5, 0x1f, 0x0d, 0xb1, 0x81, 0xf5, - 0xde, 0x0c, 0xc2, 0xde, 0xa1, 0x95, 0x35, 0x9e, 0x11, 0x7d, 0x0e, 0x46, 0xf5, 0x15, 0x2b, 0xee, - 0xbf, 0xcb, 0xe9, 0x2c, 0x95, 0xb6, 0xb2, 0x39, 0xc7, 0xa9, 0x56, 0xaf, 0x0e, 0xc3, 0x06, 0x45, - 0x9b, 0x40, 0xfa, 0x58, 0xa2, 0x9b, 0x50, 0xa8, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a, 0xb7, 0xa8, - 0x58, 0x4b, 0x02, 0x47, 0x4c, 0x8e, 0x08, 0x28, 0xce, 0xcb, 0xb0, 0xa2, 0x60, 0xff, 0x5a, 0x0e, - 0xe6, 0x7a, 0x44, 0xa7, 0x4f, 0xe8, 0x2d, 0xac, 0xbe, 0xf4, 0x16, 0x0b, 0x32, 0x41, 0xef, 0x5a, - 0x42, 0x24, 0x92, 0x48, 0xbe, 0x1b, 0x0b, 0x46, 0x92, 0xf8, 0x7d, 0xdb, 0x91, 0xeb, 0xaa, 0x8f, - 0x81, 0x9e, 0x9e, 0x10, 0x86, 0xca, 0x73, 0xb0, 0xff, 0x77, 0x52, 0xa6, 0xfa, 0xca, 0xfe, 0x4a, - 0x0e, 0x4e, 0xab, 0x21, 0xfc, 0xe6, 0x1d, 0xb8, 0xdb, 0x9d, 0x03, 0x77, 0x04, 0xca, 0x3f, 0xfb, - 0x16, 0x0c, 0xf1, 0x30, 0x5f, 0x7d, 0xf0, 0x67, 0x97, 0xcc, 0x88, 0x98, 0x8a, 0x25, 0x30, 0xa2, - 0x62, 0x7e, 0xaf, 0x05, 0x13, 0xeb, 0x4b, 0x95, 0xaa, 0x5f, 0xdb, 0x21, 0xd1, 0x02, 0xe7, 0xa7, - 0xb1, 0xe0, 0xb5, 0xac, 0x87, 0xe4, 0xa1, 0xd2, 0xb8, 0xb3, 0x8b, 0x30, 0xb0, 0xed, 0x87, 0x51, - 0xd2, 0x32, 0xe0, 0xba, 0x1f, 0x46, 0x98, 0x41, 0xec, 0xdf, 0xb5, 0x60, 0x90, 0xa5, 0xa4, 0x97, - 0x52, 0x64, 0x2b, 0x43, 0x8a, 0xdc, 0xcf, 0x77, 0xa1, 0x17, 0x60, 0x88, 0x6c, 0x6e, 0x92, 0x5a, - 0x24, 0x66, 0x55, 0xba, 0x72, 0x0f, 0x2d, 0xb3, 0x52, 0xca, 0x60, 0xb0, 0xc6, 0xf8, 0x5f, 0x2c, - 0x90, 0xd1, 0x5d, 0x28, 0x46, 0x6e, 0x93, 0x2c, 0xd4, 0xeb, 0x42, 0xb7, 0xfa, 0x10, 0xee, 0xe8, - 0xeb, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0x5f, 0xca, 0x01, 0xc4, 0xf1, 0x51, 0x7a, 0x7d, 0xe2, 0x62, - 0x87, 0xd6, 0xed, 0x72, 0x8a, 0xd6, 0x0d, 0xc5, 0x04, 0x53, 0x54, 0x6e, 0x6a, 0x98, 0xf2, 0x7d, - 0x0d, 0xd3, 0xc0, 0x61, 0x86, 0x69, 0x09, 0xa6, 0xe2, 0xf8, 0x2e, 0x66, 0x78, 0x2b, 0xf6, 0x86, - 0x5a, 0x4f, 0x02, 0x71, 0x27, 0xbe, 0x4d, 0xe0, 0xa2, 0x0a, 0x73, 0x21, 0xee, 0x1a, 0x66, 0xba, - 0xab, 0x6b, 0x31, 0x7b, 0x8c, 0x53, 0xac, 0x56, 0xcc, 0x65, 0xaa, 0x15, 0x7f, 0xc2, 0x82, 0x53, - 0xc9, 0x76, 0x98, 0x2f, 0xe5, 0x17, 0x2d, 0x38, 0xcd, 0x94, 0xab, 0xac, 0xd5, 0x4e, 0x55, 0xee, - 0xf3, 0x5d, 0x43, 0x77, 0x64, 0xf4, 0x38, 0x8e, 0x19, 0xb0, 0x9a, 0x46, 0x1a, 0xa7, 0xb7, 0x68, - 0xff, 0xfb, 0x1c, 0xcc, 0x64, 0xc5, 0xfc, 0x60, 0x96, 0xfd, 0xce, 0xfd, 0xea, 0x0e, 0xb9, 0x27, - 0xec, 0xa7, 0x63, 0xcb, 0x7e, 0x5e, 0x8c, 0x25, 0x3c, 0x19, 0x70, 0x3c, 0xd7, 0x5f, 0xc0, 0x71, - 0xb4, 0x0d, 0x53, 0xf7, 0xb6, 0x89, 0x77, 0xdb, 0x0b, 0x9d, 0xc8, 0x0d, 0x37, 0x5d, 0xa6, 0x88, - 0xe4, 0xeb, 0xe6, 0x65, 0x69, 0xe5, 0x7c, 0x37, 0x89, 0x70, 0xb0, 0x3f, 0x77, 0xde, 0x28, 0x88, - 0xbb, 0xcc, 0x0f, 0x12, 0xdc, 0x49, 0xb4, 0x33, 0x5e, 0xfb, 0xc0, 0x31, 0xc6, 0x6b, 0xb7, 0xbf, - 0x68, 0xc1, 0xd9, 0xcc, 0x24, 0x91, 0xe8, 0x0a, 0x14, 0x9c, 0x96, 0xcb, 0x65, 0xb9, 0xe2, 0x18, - 0x65, 0x32, 0x83, 0x4a, 0x99, 0x4b, 0x72, 0x15, 0x54, 0x25, 0xaf, 0xce, 0x65, 0x26, 0xaf, 0xee, - 0x99, 0x8b, 0xda, 0xfe, 0x1e, 0x0b, 0x84, 0x57, 0x62, 0x1f, 0x67, 0xf7, 0x9b, 0x32, 0xf7, 0xbf, - 0x91, 0xd3, 0xe5, 0x62, 0xb6, 0x9b, 0xa6, 0xc8, 0xe4, 0xa2, 0x78, 0x25, 0x23, 0x7f, 0x8b, 0x41, - 0xcb, 0xae, 0x83, 0x80, 0x96, 0x08, 0x93, 0x54, 0xf6, 0xee, 0xcd, 0xb3, 0x00, 0x75, 0x86, 0xab, - 0x65, 0x00, 0x57, 0x37, 0x73, 0x49, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x9b, 0x1c, 0x8c, 0xc8, 0x1c, - 0x22, 0x6d, 0xaf, 0x1f, 0x79, 0xc2, 0xa1, 0x92, 0x0a, 0xb2, 0x94, 0xf9, 0x94, 0x70, 0x25, 0x16, - 0xc3, 0xc4, 0x29, 0xf3, 0x25, 0x00, 0xc7, 0x38, 0x74, 0x17, 0x85, 0xed, 0x0d, 0x86, 0x9e, 0xf0, - 0xa1, 0xab, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x53, 0x30, 0xc9, 0xeb, 0x05, 0x7e, 0xcb, 0xd9, 0xe2, - 0x42, 0xf2, 0x41, 0xe5, 0xfc, 0x3e, 0xb9, 0x9a, 0x80, 0x1d, 0xec, 0xcf, 0x9d, 0x4a, 0x96, 0x31, - 0xed, 0x4f, 0x07, 0x15, 0x66, 0x0b, 0xc3, 0x1b, 0xa1, 0xbb, 0xbf, 0xc3, 0x84, 0x26, 0x06, 0x61, - 0x1d, 0xcf, 0xfe, 0x1c, 0xa0, 0xce, 0x6c, 0x2a, 0xe8, 0x75, 0x6e, 0x00, 0xe9, 0x06, 0xa4, 0xde, - 0x4d, 0x1b, 0xa4, 0xbb, 0x78, 0x4b, 0xf7, 0x17, 0x5e, 0x0b, 0xab, 0xfa, 0xf6, 0x5f, 0xca, 0xc3, - 0x64, 0xd2, 0xe1, 0x17, 0x5d, 0x87, 0x21, 0xce, 0x7a, 0x08, 0xf2, 0x5d, 0x8c, 0x0d, 0x34, 0x37, - 0x61, 0x76, 0x08, 0x0b, 0xee, 0x45, 0xd4, 0x47, 0x6f, 0xc1, 0x48, 0xdd, 0xbf, 0xe7, 0xdd, 0x73, - 0x82, 0xfa, 0x42, 0xa5, 0x2c, 0x96, 0x73, 0xea, 0x6b, 0xa9, 0x14, 0xa3, 0xe9, 0xae, 0xc7, 0x4c, - 0xb1, 0x16, 0x83, 0xb0, 0x4e, 0x0e, 0xad, 0xb3, 0xe0, 0xcf, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, - 0x66, 0x0d, 0xbf, 0x24, 0x91, 0x34, 0xca, 0x63, 0x22, 0x42, 0x34, 0x07, 0xe0, 0x98, 0x10, 0xfa, - 0x3c, 0x4c, 0x87, 0x19, 0x32, 0xd9, 0xac, 0xe4, 0x5a, 0xdd, 0xc4, 0x94, 0x8b, 0x8f, 0xd0, 0x77, - 0x6c, 0x9a, 0xf4, 0x36, 0xad, 0x19, 0xfb, 0x47, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0x5c, 0x8b, 0xd6, - 0x11, 0xe5, 0x5a, 0xc4, 0x50, 0x20, 0xcd, 0x56, 0xb4, 0x57, 0x72, 0x83, 0x6e, 0x19, 0x88, 0x97, - 0x05, 0x4e, 0x27, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x7a, 0x42, 0xcc, 0xfc, 0xd7, 0x31, 0x21, 0xe6, - 0xc0, 0x09, 0x26, 0xc4, 0x5c, 0x83, 0xe1, 0x2d, 0x37, 0xc2, 0xa4, 0xe5, 0x0b, 0xa6, 0x3f, 0x75, - 0x1d, 0x5e, 0xe3, 0x28, 0x9d, 0xa9, 0xd7, 0x04, 0x00, 0x4b, 0x22, 0xe8, 0x75, 0xb5, 0x03, 0x87, - 0xb2, 0xdf, 0xcc, 0x9d, 0x5a, 0xf1, 0xd4, 0x3d, 0x28, 0xd2, 0x5e, 0x0e, 0x3f, 0x6c, 0xda, 0xcb, - 0x15, 0x99, 0xac, 0xb2, 0x90, 0xed, 0xba, 0xc2, 0x72, 0x51, 0xf6, 0x48, 0x51, 0x79, 0x47, 0x4f, - 0xf0, 0x59, 0xcc, 0x3e, 0x09, 0x54, 0xee, 0xce, 0x3e, 0xd3, 0x7a, 0x7e, 0x8f, 0x05, 0xa7, 0x5b, - 0x69, 0xb9, 0x6e, 0x85, 0x02, 0xf9, 0x85, 0xbe, 0xd3, 0xe9, 0x1a, 0x0d, 0x32, 0x41, 0x4d, 0x2a, - 0x1a, 0x4e, 0x6f, 0x8e, 0x0e, 0x74, 0xb0, 0x51, 0x17, 0x8a, 0xcc, 0x4b, 0x19, 0xf9, 0x41, 0xbb, - 0x64, 0x05, 0x5d, 0x4f, 0xc9, 0x45, 0xf9, 0xe1, 0xac, 0x5c, 0x94, 0x7d, 0x67, 0xa0, 0x7c, 0x5d, - 0x65, 0x06, 0x1d, 0xcb, 0x5e, 0x4a, 0x3c, 0xef, 0x67, 0xcf, 0x7c, 0xa0, 0xaf, 0xab, 0x7c, 0xa0, - 0x5d, 0x22, 0x7b, 0xf2, 0x6c, 0x9f, 0x3d, 0xb3, 0x80, 0x6a, 0x99, 0x3c, 0x27, 0x8e, 0x26, 0x93, - 0xa7, 0x71, 0xd5, 0xf0, 0x64, 0x92, 0x4f, 0xf6, 0xb8, 0x6a, 0x0c, 0xba, 0xdd, 0x2f, 0x1b, 0x9e, - 0xb5, 0x74, 0xea, 0xa1, 0xb2, 0x96, 0xde, 0xd1, 0xb3, 0x80, 0xa2, 0x1e, 0x69, 0x2e, 0x29, 0x52, - 0x9f, 0xb9, 0x3f, 0xef, 0xe8, 0x17, 0xe0, 0x74, 0x36, 0x5d, 0x75, 0xcf, 0x75, 0xd2, 0x4d, 0xbd, - 0x02, 0x3b, 0x72, 0x8a, 0x9e, 0x3a, 0x99, 0x9c, 0xa2, 0xa7, 0x8f, 0x3c, 0xa7, 0xe8, 0x99, 0x13, - 0xc8, 0x29, 0xfa, 0xc8, 0x09, 0xe6, 0x14, 0xbd, 0xc3, 0xac, 0x2e, 0x78, 0x6c, 0x17, 0x11, 0x89, - 0x34, 0x3d, 0xea, 0x65, 0x5a, 0x00, 0x18, 0xfe, 0x71, 0x0a, 0x84, 0x63, 0x52, 0x29, 0xb9, 0x4a, - 0x67, 0x8e, 0x21, 0x57, 0xe9, 0x5a, 0x9c, 0xab, 0xf4, 0x6c, 0xf6, 0x54, 0xa7, 0xd8, 0xe9, 0x67, - 0x64, 0x28, 0xbd, 0xa3, 0x67, 0x16, 0x7d, 0xb4, 0x8b, 0x28, 0x3e, 0x4d, 0xf0, 0xd8, 0x25, 0x9f, - 0xe8, 0x6b, 0x3c, 0x9f, 0xe8, 0xb9, 0xec, 0x93, 0x3c, 0x79, 0xdd, 0x19, 0x59, 0x44, 0x69, 0xbf, - 0x54, 0xcc, 0x3b, 0x16, 0x73, 0x35, 0xa3, 0x5f, 0x2a, 0x68, 0x5e, 0x67, 0xbf, 0x14, 0x08, 0xc7, - 0xa4, 0xec, 0xef, 0xcb, 0xc1, 0x85, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x62, 0x4d, 0x63, 0x42, - 0x9a, 0xca, 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0xe1, 0xc4, 0xae, 0xc1, 0x94, 0x32, 0xf0, 0x6f, 0xb8, - 0xb5, 0xbd, 0xb5, 0xf8, 0xe5, 0xab, 0x9c, 0xa2, 0xab, 0x49, 0x04, 0xdc, 0x59, 0x07, 0x2d, 0xc0, - 0x84, 0x51, 0x58, 0x2e, 0x89, 0xb7, 0x99, 0x12, 0xdf, 0x56, 0x4d, 0x30, 0x4e, 0xe2, 0xdb, 0x5f, - 0xb6, 0xe0, 0x91, 0x8c, 0x34, 0x60, 0x7d, 0x47, 0xcb, 0xda, 0x84, 0x89, 0x96, 0x59, 0xb5, 0x47, - 0x50, 0x3d, 0x23, 0xd9, 0x98, 0xea, 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x3f, 0xb3, 0xe0, 0x7c, - 0x57, 0x8b, 0x35, 0x84, 0xe1, 0xcc, 0x56, 0x33, 0x74, 0x96, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, - 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x79, 0x38, 0x33, 0xfd, 0xba, 0xb6, 0x5a, 0x5d, 0xe8, 0xc4, 0xc0, - 0x19, 0x35, 0xd1, 0x0a, 0xa0, 0x4e, 0x88, 0x98, 0x61, 0x16, 0xbd, 0xb7, 0x93, 0x1e, 0x4e, 0xa9, - 0x81, 0x5e, 0x84, 0x31, 0x65, 0x09, 0xa7, 0xcd, 0x38, 0x3b, 0xd8, 0xb1, 0x0e, 0xc0, 0x26, 0xde, - 0xe2, 0x95, 0xdf, 0xf8, 0xfd, 0x0b, 0x1f, 0xfa, 0xad, 0xdf, 0xbf, 0xf0, 0xa1, 0xdf, 0xf9, 0xfd, - 0x0b, 0x1f, 0xfa, 0x8e, 0x07, 0x17, 0xac, 0xdf, 0x78, 0x70, 0xc1, 0xfa, 0xad, 0x07, 0x17, 0xac, - 0xdf, 0x79, 0x70, 0xc1, 0xfa, 0xbd, 0x07, 0x17, 0xac, 0x2f, 0xfd, 0xc1, 0x85, 0x0f, 0xbd, 0x99, - 0xdb, 0x7d, 0xe6, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x9f, 0xac, 0x23, 0x24, 0x01, 0x01, - 0x00, + // 13999 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x70, 0x5c, 0xd7, + 0x79, 0x98, 0xef, 0x2e, 0x5e, 0xfb, 0xe1, 0x7d, 0x40, 0x52, 0x20, 0x24, 0x12, 0xd4, 0x95, 0x4d, + 0x51, 0x96, 0x04, 0x9a, 0x7a, 0xd8, 0x8a, 0x64, 0x2b, 0x06, 0xb0, 0x00, 0xb9, 0x22, 0x01, 0xae, + 0xce, 0x82, 0xa4, 0xed, 0xc8, 0x1e, 0x5f, 0xec, 0x1e, 0x00, 0x57, 0xd8, 0xbd, 0x77, 0x75, 0xef, + 0x5d, 0x90, 0x50, 0x9d, 0x69, 0xea, 0x3c, 0x9d, 0x47, 0xc7, 0xd3, 0xc9, 0xf4, 0x91, 0x64, 0x32, + 0x9d, 0x34, 0x9d, 0xc4, 0x75, 0xdb, 0x69, 0x9a, 0x34, 0x49, 0xe3, 0xb4, 0x49, 0x9b, 0x3e, 0xd2, + 0xfe, 0x48, 0xd3, 0x4c, 0x1b, 0x67, 0x26, 0x53, 0x34, 0x61, 0x3a, 0xcd, 0xf8, 0x47, 0x93, 0xb4, + 0x49, 0x7f, 0x14, 0xcd, 0x34, 0x9d, 0xf3, 0xbc, 0xe7, 0xdc, 0xc7, 0xee, 0x82, 0x02, 0x61, 0xd9, + 0xa3, 0x7f, 0xbb, 0xe7, 0xfb, 0xce, 0x77, 0xce, 0x3d, 0xcf, 0xef, 0x7c, 0x4f, 0x78, 0x65, 0xf7, + 0xa5, 0x70, 0xc1, 0xf5, 0x2f, 0xef, 0x76, 0x36, 0x49, 0xe0, 0x91, 0x88, 0x84, 0x97, 0xf7, 0x88, + 0xd7, 0xf0, 0x83, 0xcb, 0x02, 0xe0, 0xb4, 0xdd, 0xcb, 0x75, 0x3f, 0x20, 0x97, 0xf7, 0xae, 0x5c, + 0xde, 0x26, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0x16, 0xda, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x05, + 0xa7, 0xed, 0x2e, 0x50, 0x9c, 0x85, 0xbd, 0x2b, 0x73, 0xcf, 0x6e, 0xbb, 0xd1, 0x4e, 0x67, 0x73, + 0xa1, 0xee, 0xb7, 0x2e, 0x6f, 0xfb, 0xdb, 0xfe, 0x65, 0x86, 0xba, 0xd9, 0xd9, 0x62, 0xff, 0xd8, + 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7b, 0x21, 0x6e, 0xa6, 0xe5, 0xd4, 0x77, 0x5c, 0x8f, 0x04, 0xfb, + 0x97, 0xdb, 0xbb, 0xdb, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0xa8, 0x93, 0x64, 0xc3, 0x5d, 0x6b, + 0x85, 0x97, 0x5b, 0x24, 0x72, 0x32, 0xba, 0x3b, 0x77, 0x39, 0xaf, 0x56, 0xd0, 0xf1, 0x22, 0xb7, + 0x95, 0x6e, 0xe6, 0xc3, 0xbd, 0x2a, 0x84, 0xf5, 0x1d, 0xd2, 0x72, 0x52, 0xf5, 0x9e, 0xcf, 0xab, + 0xd7, 0x89, 0xdc, 0xe6, 0x65, 0xd7, 0x8b, 0xc2, 0x28, 0x48, 0x56, 0xb2, 0xbf, 0x6a, 0xc1, 0x85, + 0xc5, 0x3b, 0xb5, 0x95, 0xa6, 0x13, 0x46, 0x6e, 0x7d, 0xa9, 0xe9, 0xd7, 0x77, 0x6b, 0x91, 0x1f, + 0x90, 0xdb, 0x7e, 0xb3, 0xd3, 0x22, 0x35, 0x36, 0x10, 0xe8, 0x19, 0x18, 0xd9, 0x63, 0xff, 0x2b, + 0xe5, 0x59, 0xeb, 0x82, 0x75, 0xa9, 0xb4, 0x34, 0xf5, 0xeb, 0x07, 0xf3, 0xef, 0xbb, 0x7f, 0x30, + 0x3f, 0x72, 0x5b, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc2, 0xd0, 0x56, 0xb8, 0xb1, 0xdf, 0x26, 0xb3, + 0x05, 0x86, 0x3b, 0x21, 0x70, 0x87, 0x56, 0x6b, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x0c, 0xa5, 0xb6, + 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x6c, 0xf1, 0x82, 0x75, 0x69, 0x70, 0x69, 0x5a, 0xa0, 0x96, + 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xda, 0x8d, 0x80, 0x38, 0x8d, 0x9b, 0x5e, 0x73, 0x7f, 0x76, 0xe0, + 0x82, 0x75, 0x69, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x52, 0x80, 0x91, 0xc5, + 0xad, 0x2d, 0xd7, 0x73, 0xa3, 0x7d, 0x74, 0x1b, 0xc6, 0x3c, 0xbf, 0x41, 0xe4, 0x7f, 0xf6, 0x15, + 0xa3, 0xcf, 0x5d, 0x58, 0x48, 0x2f, 0xa5, 0x85, 0x75, 0x0d, 0x6f, 0x69, 0xea, 0xfe, 0xc1, 0xfc, + 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x28, 0xb2, 0x05, 0x46, 0x76, 0x3e, + 0x8b, 0x6c, 0x35, 0x46, 0x5b, 0x9a, 0xbc, 0x7f, 0x30, 0x3f, 0xaa, 0x15, 0x60, 0x9d, 0x08, 0xda, + 0x84, 0x49, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x91, 0x47, 0x57, 0x43, 0x5d, + 0x9a, 0xb9, 0x7f, 0x30, 0x3f, 0x99, 0x28, 0xc4, 0x49, 0x82, 0xf6, 0xdb, 0x30, 0xb1, 0x18, 0x45, + 0x4e, 0x7d, 0x87, 0x34, 0xf8, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x73, 0x5a, 0x44, 0xcc, 0xef, 0x05, + 0x31, 0xb0, 0x03, 0xeb, 0x4e, 0x8b, 0x1c, 0x1e, 0xcc, 0x4f, 0xdd, 0xf2, 0xdc, 0xb7, 0x3a, 0x62, + 0x55, 0xd0, 0x32, 0xcc, 0xb0, 0xd1, 0x73, 0x00, 0x0d, 0xb2, 0xe7, 0xd6, 0x49, 0xd5, 0x89, 0x76, + 0xc4, 0x7c, 0x23, 0x51, 0x17, 0xca, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x1e, 0x94, 0x16, 0xf7, 0x7c, + 0xb7, 0x51, 0xf5, 0x1b, 0x21, 0xda, 0x85, 0xc9, 0x76, 0x40, 0xb6, 0x48, 0xa0, 0x8a, 0x66, 0xad, + 0x0b, 0xc5, 0x4b, 0xa3, 0xcf, 0x5d, 0xca, 0xfc, 0x58, 0x13, 0x75, 0xc5, 0x8b, 0x82, 0xfd, 0xa5, + 0x47, 0x44, 0x7b, 0x93, 0x09, 0x28, 0x4e, 0x52, 0xb6, 0xff, 0x55, 0x01, 0x4e, 0x2f, 0xbe, 0xdd, + 0x09, 0x48, 0xd9, 0x0d, 0x77, 0x93, 0x2b, 0xbc, 0xe1, 0x86, 0xbb, 0xeb, 0xf1, 0x08, 0xa8, 0xa5, + 0x55, 0x16, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x98, 0xfe, 0xbe, 0x85, 0x2b, 0xe2, 0x93, 0x67, + 0x04, 0xf2, 0x68, 0xd9, 0x89, 0x9c, 0x32, 0x07, 0x61, 0x89, 0x83, 0xd6, 0x60, 0xb4, 0xce, 0x36, + 0xe4, 0xf6, 0x9a, 0xdf, 0x20, 0x6c, 0x32, 0x4b, 0x4b, 0x4f, 0x53, 0xf4, 0xe5, 0xb8, 0xf8, 0xf0, + 0x60, 0x7e, 0x96, 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xbf, 0x06, 0x18, + 0x25, 0xc8, 0xd8, 0x5b, 0x97, 0xb4, 0xad, 0x32, 0xc8, 0xb6, 0xca, 0x58, 0xf6, 0x36, 0x41, 0x57, + 0x60, 0x60, 0xd7, 0xf5, 0x1a, 0xb3, 0x43, 0x8c, 0xd6, 0x39, 0x3a, 0xe7, 0xd7, 0x5d, 0xaf, 0x71, + 0x78, 0x30, 0x3f, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0xf3, 0x0c, 0xb6, + 0xea, 0x36, 0x49, 0x95, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, + 0xf5, 0x80, 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x53, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, + 0x71, 0x02, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x26, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, + 0x62, 0xaf, 0x03, 0x01, 0x7d, 0x0c, 0x26, 0xe3, 0xc6, 0xc2, 0xb6, 0x53, 0x97, 0x03, 0xc8, 0xb6, + 0x4c, 0xcd, 0x04, 0xe1, 0x24, 0xae, 0xfd, 0xf7, 0x2c, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, + 0xd5, 0xfe, 0x45, 0x0b, 0x86, 0x97, 0x5c, 0xaf, 0xe1, 0x7a, 0xdb, 0xe8, 0xb3, 0x30, 0x42, 0xef, + 0xa6, 0x86, 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x90, 0xb6, 0xb7, 0xd4, 0x55, 0xb1, 0xd0, 0xde, 0xdd, + 0xa6, 0x05, 0xe1, 0x02, 0xc5, 0xa6, 0xbb, 0xed, 0xe6, 0xe6, 0x9b, 0xa4, 0x1e, 0xad, 0x91, 0xc8, + 0x89, 0x3f, 0x27, 0x2e, 0xc3, 0x8a, 0x2a, 0xba, 0x0e, 0x43, 0x91, 0x13, 0x6c, 0x93, 0x48, 0x1c, + 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x5e, 0x9d, 0xc4, 0xd7, 0xc2, 0x06, 0xab, + 0x8a, 0x05, 0x09, 0xfb, 0x87, 0x86, 0xe1, 0xec, 0x72, 0xad, 0x92, 0xb3, 0xae, 0x2e, 0xc2, 0x50, + 0x23, 0x70, 0xf7, 0x48, 0x20, 0xc6, 0x59, 0x51, 0x29, 0xb3, 0x52, 0x2c, 0xa0, 0xe8, 0x25, 0x18, + 0xe3, 0x17, 0xd2, 0x35, 0xc7, 0x6b, 0x34, 0xe5, 0x10, 0x9f, 0x12, 0xd8, 0x63, 0xb7, 0x35, 0x18, + 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x26, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x05, 0x0b, 0xa6, 0x78, + 0x33, 0x8b, 0x51, 0x14, 0xb8, 0x9b, 0x9d, 0x88, 0x84, 0xb3, 0x83, 0xec, 0xa4, 0x5b, 0xce, 0x1a, + 0xad, 0xdc, 0x11, 0x58, 0xb8, 0x9d, 0xa0, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, 0xee, 0x54, 0x12, 0x8c, + 0x53, 0xcd, 0xa2, 0xef, 0xb4, 0x60, 0xae, 0xee, 0x7b, 0x51, 0xe0, 0x37, 0x9b, 0x24, 0xa8, 0x76, + 0x36, 0x9b, 0x6e, 0xb8, 0xc3, 0xd7, 0x29, 0x26, 0x5b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, + 0xe6, 0xf0, 0xfc, 0xfd, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x4b, 0x33, 0x68, 0x17, 0x10, + 0xbd, 0x4a, 0x6b, 0x91, 0xb3, 0x4d, 0xe2, 0xc6, 0x87, 0xfb, 0x6f, 0xfc, 0xcc, 0xfd, 0x83, 0x79, + 0xb4, 0x9e, 0x22, 0x81, 0x33, 0xc8, 0xa2, 0xb7, 0xe0, 0x14, 0x2d, 0x4d, 0x7d, 0xeb, 0x48, 0xff, + 0xcd, 0xcd, 0xde, 0x3f, 0x98, 0x3f, 0xb5, 0x9e, 0x41, 0x04, 0x67, 0x92, 0x46, 0xdf, 0x61, 0xc1, + 0xd9, 0xf8, 0xf3, 0x57, 0xee, 0xb5, 0x1d, 0xaf, 0x11, 0x37, 0x5c, 0xea, 0xbf, 0x61, 0x7a, 0x26, + 0x9f, 0x5d, 0xce, 0xa3, 0x84, 0xf3, 0x1b, 0x99, 0x5b, 0x86, 0xd3, 0x99, 0xab, 0x05, 0x4d, 0x41, + 0x71, 0x97, 0x70, 0x2e, 0xa8, 0x84, 0xe9, 0x4f, 0x74, 0x0a, 0x06, 0xf7, 0x9c, 0x66, 0x47, 0x6c, + 0x14, 0xcc, 0xff, 0xbc, 0x5c, 0x78, 0xc9, 0xb2, 0xff, 0x75, 0x11, 0x26, 0x97, 0x6b, 0x95, 0x07, + 0xda, 0x85, 0xfa, 0x35, 0x54, 0xe8, 0x7a, 0x0d, 0xc5, 0x97, 0x5a, 0x31, 0xf7, 0x52, 0xfb, 0xcb, + 0x19, 0x5b, 0x68, 0x80, 0x6d, 0xa1, 0x6f, 0xc9, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6, 0x72, 0x56, + 0xd1, 0x20, 0x9b, 0xcc, 0x4c, 0x8e, 0xe5, 0x86, 0x5f, 0x77, 0x9a, 0xc9, 0xa3, 0xef, 0x88, 0x4b, + 0xe9, 0x78, 0xe6, 0xb1, 0x0e, 0x63, 0xcb, 0x4e, 0xdb, 0xd9, 0x74, 0x9b, 0x6e, 0xe4, 0x92, 0x10, + 0x3d, 0x09, 0x45, 0xa7, 0xd1, 0x60, 0xdc, 0x56, 0x69, 0xe9, 0xf4, 0xfd, 0x83, 0xf9, 0xe2, 0x62, + 0x83, 0x5e, 0xfb, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x1f, 0x84, 0x81, 0x46, 0xe0, 0xb7, 0x67, + 0x0b, 0x0c, 0x93, 0xee, 0xba, 0x81, 0x72, 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xd5, 0x02, + 0x3c, 0xb6, 0x4c, 0xda, 0x3b, 0xab, 0xb5, 0x9c, 0xf3, 0xfb, 0x12, 0x8c, 0xb4, 0x7c, 0xcf, 0x8d, + 0xfc, 0x20, 0x14, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x0b, 0x30, 0xd0, 0x8e, + 0x99, 0xca, 0x31, 0xc9, 0x90, 0x32, 0x76, 0x92, 0x41, 0x28, 0x46, 0x27, 0x24, 0x81, 0x58, 0x31, + 0x0a, 0xe3, 0x56, 0x48, 0x02, 0xcc, 0x20, 0xf1, 0xcd, 0x4c, 0xef, 0x6c, 0x71, 0x42, 0x27, 0x6e, + 0x66, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, 0x42, 0x29, 0x4c, 0xcc, 0x6c, 0x5f, 0xdb, 0x74, 0x9c, 0x5d, + 0xdd, 0x6a, 0x26, 0x63, 0x22, 0xc6, 0x8d, 0x32, 0xd4, 0xf3, 0xea, 0xfe, 0x4a, 0x01, 0x10, 0x1f, + 0xc2, 0x6f, 0xb0, 0x81, 0xbb, 0x95, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4, 0x71, 0x8d, 0xde, 0x9f, 0x59, + 0xf0, 0xd8, 0xb2, 0xeb, 0x35, 0x48, 0x90, 0xb3, 0x00, 0x1f, 0xce, 0x5b, 0xf6, 0x68, 0x4c, 0x83, + 0xb1, 0xc4, 0x06, 0x8e, 0x61, 0x89, 0xd9, 0x7f, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xae, 0xfb, 0xd8, + 0x5b, 0xe9, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x80, 0x89, 0xe5, 0xa6, 0x4b, 0xbc, 0xa8, 0x52, + 0x5d, 0xf6, 0xbd, 0x2d, 0x77, 0x1b, 0xbd, 0x0c, 0x13, 0x91, 0xdb, 0x22, 0x7e, 0x27, 0xaa, 0x91, + 0xba, 0xef, 0xb1, 0x97, 0xa4, 0x75, 0x69, 0x70, 0x09, 0xdd, 0x3f, 0x98, 0x9f, 0xd8, 0x30, 0x20, + 0x38, 0x81, 0x69, 0xff, 0x2e, 0x1d, 0x3f, 0xbf, 0xd5, 0xf6, 0x3d, 0xe2, 0x45, 0xcb, 0xbe, 0xd7, + 0xe0, 0x12, 0x87, 0x97, 0x61, 0x20, 0xa2, 0xe3, 0xc1, 0xc7, 0xee, 0xa2, 0xdc, 0x28, 0x74, 0x14, + 0x0e, 0x0f, 0xe6, 0xcf, 0xa4, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0xb7, 0xc0, 0x50, 0x18, 0x39, + 0x51, 0x27, 0x14, 0xa3, 0xf9, 0xb8, 0x1c, 0xcd, 0x1a, 0x2b, 0x3d, 0x3c, 0x98, 0x9f, 0x54, 0xd5, + 0x78, 0x11, 0x16, 0x15, 0xd0, 0x53, 0x30, 0xdc, 0x22, 0x61, 0xe8, 0x6c, 0xcb, 0xdb, 0x70, 0x52, + 0xd4, 0x1d, 0x5e, 0xe3, 0xc5, 0x58, 0xc2, 0xd1, 0x13, 0x30, 0x48, 0x82, 0xc0, 0x0f, 0xc4, 0x1e, + 0x1d, 0x17, 0x88, 0x83, 0x2b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x98, 0x54, 0x7d, 0xe5, + 0x6d, 0x9d, 0xc0, 0xab, 0xe0, 0x53, 0x00, 0x75, 0xf9, 0x81, 0x21, 0xbb, 0x3d, 0x46, 0x9f, 0xbb, + 0x98, 0x79, 0x51, 0xa7, 0x86, 0x31, 0xa6, 0xac, 0x8a, 0x42, 0xac, 0x51, 0xb3, 0xff, 0x99, 0x05, + 0x33, 0x89, 0x2f, 0xba, 0xe1, 0x86, 0x11, 0x7a, 0x23, 0xf5, 0x55, 0x0b, 0xfd, 0x7d, 0x15, 0xad, + 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0x74, 0x23, 0xd2, 0x92, 0x1f, + 0xf3, 0x44, 0xd7, 0x8f, 0xe1, 0xbd, 0x8a, 0x67, 0xa4, 0x42, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, 0xd5, + 0x22, 0x94, 0xf8, 0xb2, 0x5d, 0x73, 0xda, 0x27, 0x30, 0x17, 0x4f, 0x43, 0xc9, 0x6d, 0xb5, 0x3a, + 0x91, 0xb3, 0x29, 0x8e, 0xf3, 0x11, 0xbe, 0xb5, 0x2a, 0xb2, 0x10, 0xc7, 0x70, 0x54, 0x81, 0x01, + 0xd6, 0x15, 0xfe, 0x95, 0x4f, 0x66, 0x7f, 0xa5, 0xe8, 0xfb, 0x42, 0xd9, 0x89, 0x1c, 0xce, 0x49, + 0xa9, 0x7b, 0x84, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0x9b, 0xae, 0xe7, 0x04, 0xfb, 0xb4, 0x6c, + 0xb6, 0xc8, 0x08, 0x3e, 0xdb, 0x9d, 0xe0, 0x92, 0xc2, 0xe7, 0x64, 0xd5, 0x87, 0xc5, 0x00, 0xac, + 0x11, 0x9d, 0xfb, 0x08, 0x94, 0x14, 0xf2, 0x51, 0x18, 0xa2, 0xb9, 0x8f, 0xc1, 0x64, 0xa2, 0xad, + 0x5e, 0xd5, 0xc7, 0x74, 0x7e, 0xea, 0x97, 0xd8, 0x91, 0x21, 0x7a, 0xbd, 0xe2, 0xed, 0x89, 0x23, + 0xf7, 0x6d, 0x38, 0xd5, 0xcc, 0x38, 0xc9, 0xc4, 0xbc, 0xf6, 0x7f, 0xf2, 0x3d, 0x26, 0x3e, 0xfb, + 0x54, 0x16, 0x14, 0x67, 0xb6, 0x41, 0x79, 0x04, 0xbf, 0x4d, 0x37, 0x88, 0xd3, 0xd4, 0xd9, 0xed, + 0x9b, 0xa2, 0x0c, 0x2b, 0x28, 0x3d, 0xef, 0x4e, 0xa9, 0xce, 0x5f, 0x27, 0xfb, 0x35, 0xd2, 0x24, + 0xf5, 0xc8, 0x0f, 0xbe, 0xae, 0xdd, 0x3f, 0xc7, 0x47, 0x9f, 0x1f, 0x97, 0xa3, 0x82, 0x40, 0xf1, + 0x3a, 0xd9, 0xe7, 0x53, 0xa1, 0x7f, 0x5d, 0xb1, 0xeb, 0xd7, 0xfd, 0x8c, 0x05, 0xe3, 0xea, 0xeb, + 0x4e, 0xe0, 0x5c, 0x58, 0x32, 0xcf, 0x85, 0x73, 0x5d, 0x17, 0x78, 0xce, 0x89, 0xf0, 0x95, 0x02, + 0x9c, 0x55, 0x38, 0xf4, 0x6d, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x19, 0x4a, 0x9e, 0x92, 0x5a, 0x59, + 0xa6, 0xb8, 0x28, 0x96, 0x59, 0xc5, 0x38, 0x94, 0xc5, 0xf3, 0x62, 0xd1, 0xd2, 0x98, 0x2e, 0xce, + 0x15, 0xa2, 0xdb, 0x25, 0x28, 0x76, 0xdc, 0x86, 0xb8, 0x60, 0x3e, 0x24, 0x47, 0xfb, 0x56, 0xa5, + 0x7c, 0x78, 0x30, 0xff, 0x78, 0x9e, 0x2a, 0x81, 0xde, 0x6c, 0xe1, 0xc2, 0xad, 0x4a, 0x19, 0xd3, + 0xca, 0x68, 0x11, 0x26, 0xa5, 0xb6, 0xe4, 0x36, 0x65, 0xb7, 0x7c, 0x4f, 0xdc, 0x43, 0x4a, 0x26, + 0x8b, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, 0x32, 0x4c, 0xed, 0x76, 0x36, 0x49, 0x93, 0x44, 0xfc, 0x83, + 0xaf, 0x13, 0x2e, 0xb1, 0x2c, 0xc5, 0x2f, 0xb3, 0xeb, 0x09, 0x38, 0x4e, 0xd5, 0xb0, 0xff, 0x82, + 0xdd, 0x07, 0x62, 0xf4, 0xaa, 0x81, 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, + 0x8a, 0xeb, 0x64, 0x7f, 0xc3, 0xa7, 0x9c, 0x79, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, + 0xff, 0x73, 0x05, 0x38, 0xad, 0x46, 0xc0, 0x60, 0x02, 0xbf, 0xd1, 0xc7, 0xe0, 0x0a, 0x8c, 0x36, + 0xc8, 0x96, 0xd3, 0x69, 0x46, 0x4a, 0x7c, 0x3e, 0xc8, 0x55, 0x28, 0xe5, 0xb8, 0x18, 0xeb, 0x38, + 0x47, 0x18, 0xb6, 0xff, 0x3d, 0xca, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, + 0xd7, 0x3c, 0x01, 0x83, 0x6e, 0x8b, 0x32, 0x66, 0x05, 0x93, 0xdf, 0xaa, 0xd0, 0x42, 0xcc, 0x61, + 0xe8, 0x03, 0x30, 0x5c, 0xf7, 0x5b, 0x2d, 0xc7, 0x6b, 0xb0, 0x2b, 0xaf, 0xb4, 0x34, 0x4a, 0x79, + 0xb7, 0x65, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, 0xa5, 0xa5, + 0x11, 0xda, 0xd2, 0x62, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0x77, 0xfd, 0x60, 0xd7, 0xf5, + 0xb6, 0xcb, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x28, 0x08, 0xd6, 0xb0, 0xd0, 0x2a, 0x0c, + 0xb6, 0xfd, 0x20, 0x0a, 0x67, 0x87, 0xd8, 0x70, 0x3f, 0x9e, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0xfa, + 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0xdd, 0x80, 0x61, 0xe2, 0xed, 0xad, 0x06, + 0x7e, 0x6b, 0x76, 0x26, 0x9f, 0xd2, 0x0a, 0x47, 0xe1, 0xcb, 0x2c, 0xe6, 0x51, 0x45, 0x31, 0x96, + 0x24, 0xd0, 0xb7, 0x40, 0x91, 0x78, 0x7b, 0xb3, 0xc3, 0x8c, 0xd2, 0x5c, 0x0e, 0xa5, 0xdb, 0x4e, + 0x10, 0x9f, 0xf9, 0x2b, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0x27, 0xa1, 0x24, 0x0f, 0x8c, 0x50, 0x08, + 0xeb, 0x32, 0x17, 0xac, 0x3c, 0x66, 0x30, 0x79, 0xab, 0xe3, 0x06, 0xa4, 0x45, 0xbc, 0x28, 0x8c, + 0x4f, 0x48, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x52, 0x4a, 0x88, 0xd7, 0xfc, 0x8e, 0x17, 0x85, + 0xb3, 0x25, 0xd6, 0xbd, 0x4c, 0xdd, 0xdd, 0xed, 0x18, 0x2f, 0x29, 0x42, 0xe6, 0x95, 0xb1, 0x41, + 0x0a, 0x7d, 0x1a, 0xc6, 0xf9, 0x7f, 0xae, 0x01, 0x0b, 0x67, 0x4f, 0x33, 0xda, 0x17, 0xf2, 0x69, + 0x73, 0xc4, 0xa5, 0xd3, 0x82, 0xf8, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x1a, 0xc2, 0x30, 0xde, 0x74, + 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x1a, 0xf8, 0x9b, 0x64, 0x16, 0xd8, 0xc0, 0x9c, 0xcd, 0xd6, 0x98, + 0xf9, 0x9b, 0x64, 0x69, 0x9a, 0xd2, 0xbc, 0xa1, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x0b, 0x26, 0xe8, + 0x8b, 0xcd, 0x8d, 0x89, 0x8e, 0xf6, 0x22, 0xca, 0xde, 0x55, 0xd8, 0xa8, 0x84, 0x13, 0x44, 0xd0, + 0x4d, 0x18, 0x0b, 0x23, 0x27, 0x88, 0x3a, 0x6d, 0x4e, 0xf4, 0x4c, 0x2f, 0xa2, 0x4c, 0xe1, 0x5a, + 0xd3, 0xaa, 0x60, 0x83, 0x00, 0x7a, 0x0d, 0x4a, 0x4d, 0x77, 0x8b, 0xd4, 0xf7, 0xeb, 0x4d, 0x32, + 0x3b, 0xc6, 0xa8, 0x65, 0x1e, 0x2a, 0x37, 0x24, 0x12, 0xe7, 0x73, 0xd5, 0x5f, 0x1c, 0x57, 0x47, + 0xb7, 0xe1, 0x4c, 0x44, 0x82, 0x96, 0xeb, 0x39, 0xf4, 0x30, 0x10, 0x4f, 0x2b, 0xa6, 0xc8, 0x1c, + 0x67, 0xbb, 0xed, 0xbc, 0x98, 0x8d, 0x33, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x07, 0xb3, + 0x19, 0x10, 0xbf, 0xe9, 0xd6, 0xf7, 0x67, 0x4f, 0x31, 0xca, 0x1f, 0x15, 0x94, 0x67, 0x37, 0x72, + 0xf0, 0x0e, 0xbb, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x13, 0x26, 0xd9, 0x09, 0x54, 0xed, 0x34, 0x9b, + 0xa2, 0xc1, 0x09, 0xd6, 0xe0, 0x07, 0xe4, 0x7d, 0x5c, 0x31, 0xc1, 0x87, 0x07, 0xf3, 0x10, 0xff, + 0xc3, 0xc9, 0xda, 0x68, 0x93, 0xe9, 0xcc, 0x3a, 0x81, 0x1b, 0xed, 0xd3, 0x73, 0x83, 0xdc, 0x8b, + 0x66, 0x27, 0xbb, 0xca, 0x2b, 0x74, 0x54, 0xa5, 0x58, 0xd3, 0x0b, 0x71, 0x92, 0x20, 0x3d, 0x52, + 0xc3, 0xa8, 0xe1, 0x7a, 0xb3, 0x53, 0xfc, 0x5d, 0x22, 0x4f, 0xa4, 0x1a, 0x2d, 0xc4, 0x1c, 0xc6, + 0xf4, 0x65, 0xf4, 0xc7, 0x4d, 0x7a, 0x73, 0x4d, 0x33, 0xc4, 0x58, 0x5f, 0x26, 0x01, 0x38, 0xc6, + 0xa1, 0xcc, 0x64, 0x14, 0xed, 0xcf, 0x22, 0x86, 0xaa, 0x0e, 0x96, 0x8d, 0x8d, 0x4f, 0x62, 0x5a, + 0x6e, 0x6f, 0xc2, 0x84, 0x3a, 0x08, 0xd9, 0x98, 0xa0, 0x79, 0x18, 0x64, 0xec, 0x93, 0x90, 0xae, + 0x95, 0x68, 0x17, 0x18, 0x6b, 0x85, 0x79, 0x39, 0xeb, 0x82, 0xfb, 0x36, 0x59, 0xda, 0x8f, 0x08, + 0x7f, 0xd3, 0x17, 0xb5, 0x2e, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0xc7, 0xd9, 0xd0, 0xf8, 0xb4, + 0xed, 0xe3, 0x7e, 0x79, 0x06, 0x46, 0x76, 0xfc, 0x30, 0xa2, 0xd8, 0xac, 0x8d, 0xc1, 0x98, 0xf1, + 0xbc, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc0, 0x78, 0x5d, 0x6f, 0x40, 0x5c, 0x8e, 0xea, 0x18, + 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x09, 0x46, 0x98, 0x0d, 0x48, 0xdd, 0x6f, 0x0a, 0xae, 0x4d, + 0xde, 0xf0, 0x23, 0x55, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x84, 0x21, 0xda, 0x85, + 0x4a, 0x55, 0x5c, 0x4b, 0x4a, 0x50, 0x74, 0x8d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x5a, 0x41, 0x1b, + 0x65, 0xfa, 0x1e, 0x26, 0xa8, 0x0a, 0xc3, 0x77, 0x1d, 0x37, 0x72, 0xbd, 0x6d, 0xc1, 0x7f, 0x3c, + 0xd5, 0xf5, 0x8e, 0x62, 0x95, 0xee, 0xf0, 0x0a, 0xfc, 0x16, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, + 0x06, 0x1d, 0xcf, 0xa3, 0x14, 0x0b, 0xfd, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, + 0x06, 0xbd, 0x01, 0x20, 0x77, 0x18, 0x69, 0x08, 0xdb, 0x8b, 0x67, 0x7a, 0x13, 0xdd, 0x50, 0x75, + 0x96, 0x26, 0xe8, 0x1d, 0x1d, 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0x62, 0x7c, 0x5a, 0xba, 0x33, 0xe8, + 0xdb, 0xe8, 0x12, 0x77, 0x82, 0x88, 0x34, 0x16, 0x23, 0x31, 0x38, 0x1f, 0xec, 0xef, 0x91, 0xb2, + 0xe1, 0xb6, 0x88, 0xbe, 0x1d, 0x04, 0x11, 0x1c, 0xd3, 0xb3, 0x7f, 0xa1, 0x08, 0xb3, 0x79, 0xdd, + 0xa5, 0x8b, 0x8e, 0xdc, 0x73, 0xa3, 0x65, 0xca, 0x5e, 0x59, 0xe6, 0xa2, 0x5b, 0x11, 0xe5, 0x58, + 0x61, 0xd0, 0xd9, 0x0f, 0xdd, 0x6d, 0xf9, 0xc6, 0x1c, 0x8c, 0x67, 0xbf, 0xc6, 0x4a, 0xb1, 0x80, + 0x52, 0xbc, 0x80, 0x38, 0xa1, 0x30, 0xee, 0xd1, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x69, + 0xd7, 0x40, 0x0f, 0x69, 0x97, 0x31, 0x44, 0x83, 0xc7, 0x3b, 0x44, 0xe8, 0x33, 0x00, 0x5b, 0xae, + 0xe7, 0x86, 0x3b, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, 0x2b, 0xe6, 0x6c, 0x55, 0x51, 0xc1, 0x1a, 0x45, + 0xf4, 0x22, 0x8c, 0xaa, 0x0d, 0x58, 0x29, 0x33, 0x4d, 0xa7, 0x66, 0x39, 0x12, 0x9f, 0x46, 0x65, + 0xac, 0xe3, 0xd9, 0x6f, 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0xfd, 0x8e, 0x6f, 0xa1, + 0xfb, 0xf8, 0xda, 0x5f, 0x2b, 0xc2, 0xa4, 0xd1, 0x58, 0x27, 0xec, 0xe3, 0xcc, 0xba, 0x4a, 0x0f, + 0x70, 0x27, 0x22, 0x62, 0xff, 0xd9, 0xbd, 0xb7, 0x8a, 0x7e, 0xc8, 0xd3, 0x1d, 0xc0, 0xeb, 0xa3, + 0xcf, 0x40, 0xa9, 0xe9, 0x84, 0x4c, 0x72, 0x46, 0xc4, 0xbe, 0xeb, 0x87, 0x58, 0xfc, 0x30, 0x71, + 0xc2, 0x48, 0xbb, 0x35, 0x39, 0xed, 0x98, 0x24, 0xbd, 0x69, 0x28, 0x7f, 0x22, 0xad, 0xc7, 0x54, + 0x27, 0x28, 0x13, 0xb3, 0x8f, 0x39, 0x0c, 0xbd, 0x04, 0x63, 0x01, 0x61, 0xab, 0x62, 0x99, 0x72, + 0x73, 0x6c, 0x99, 0x0d, 0xc6, 0x6c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0xdf, 0x06, 0x43, 0x5d, + 0xde, 0x06, 0x4f, 0xc1, 0x30, 0xfb, 0xa1, 0x56, 0x80, 0x9a, 0x8d, 0x0a, 0x2f, 0xc6, 0x12, 0x9e, + 0x5c, 0x30, 0x23, 0xfd, 0x2d, 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x69, 0x99, 0x47, 0xf8, 0x29, + 0x27, 0x96, 0x3c, 0x96, 0x30, 0xfb, 0x83, 0x30, 0x51, 0x76, 0x48, 0xcb, 0xf7, 0x56, 0xbc, 0x46, + 0xdb, 0x77, 0xbd, 0x08, 0xcd, 0xc2, 0x00, 0xbb, 0x44, 0xf8, 0x11, 0x30, 0x40, 0x1b, 0xc2, 0x03, + 0xf4, 0x41, 0x60, 0x6f, 0xc3, 0xe9, 0xb2, 0x7f, 0xd7, 0xbb, 0xeb, 0x04, 0x8d, 0xc5, 0x6a, 0x45, + 0x7b, 0x5f, 0xaf, 0xcb, 0xf7, 0x1d, 0x37, 0xda, 0xca, 0x3c, 0x7a, 0xb5, 0x9a, 0x9c, 0xad, 0x5d, + 0x75, 0x9b, 0x24, 0x47, 0x0a, 0xf2, 0x37, 0x0a, 0x46, 0x4b, 0x31, 0xbe, 0xd2, 0x6a, 0x59, 0xb9, + 0x5a, 0xad, 0xd7, 0x61, 0x64, 0xcb, 0x25, 0xcd, 0x06, 0x26, 0x5b, 0x62, 0x25, 0x3e, 0x99, 0x6f, + 0x87, 0xb2, 0x4a, 0x31, 0xa5, 0xd4, 0x8b, 0xbf, 0x0e, 0x57, 0x45, 0x65, 0xac, 0xc8, 0xa0, 0x5d, + 0x98, 0x92, 0x0f, 0x06, 0x09, 0x15, 0xeb, 0xf2, 0xa9, 0x6e, 0xaf, 0x10, 0x93, 0xf8, 0xa9, 0xfb, + 0x07, 0xf3, 0x53, 0x38, 0x41, 0x06, 0xa7, 0x08, 0xd3, 0xe7, 0x60, 0x8b, 0x9e, 0xc0, 0x03, 0x6c, + 0xf8, 0xd9, 0x73, 0x90, 0xbd, 0x6c, 0x59, 0xa9, 0xfd, 0x63, 0x16, 0x3c, 0x92, 0x1a, 0x19, 0xf1, + 0xc2, 0x3f, 0xe6, 0x59, 0x48, 0xbe, 0xb8, 0x0b, 0xbd, 0x5f, 0xdc, 0xf6, 0xdf, 0xb7, 0xe0, 0xd4, + 0x4a, 0xab, 0x1d, 0xed, 0x97, 0x5d, 0x53, 0x05, 0xf5, 0x11, 0x18, 0x6a, 0x91, 0x86, 0xdb, 0x69, + 0x89, 0x99, 0x9b, 0x97, 0xa7, 0xd4, 0x1a, 0x2b, 0x3d, 0x3c, 0x98, 0x1f, 0xaf, 0x45, 0x7e, 0xe0, + 0x6c, 0x13, 0x5e, 0x80, 0x05, 0x3a, 0x3b, 0xeb, 0xdd, 0xb7, 0xc9, 0x0d, 0xb7, 0xe5, 0x4a, 0xbb, + 0xa2, 0xae, 0x32, 0xbb, 0x05, 0x39, 0xa0, 0x0b, 0xaf, 0x77, 0x1c, 0x2f, 0x72, 0xa3, 0x7d, 0xa1, + 0x3d, 0x92, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0xaa, 0x05, 0x93, 0x72, 0xdd, 0x2f, 0x36, 0x1a, 0x01, + 0x09, 0x43, 0x34, 0x07, 0x05, 0xb7, 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x4a, 0x15, 0x17, 0xdc, + 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, 0x34, 0x15, 0x69, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x12, + 0x8c, 0x78, 0x7e, 0x83, 0xdb, 0x76, 0xf1, 0x2b, 0x8d, 0x2d, 0xb0, 0x75, 0x51, 0x86, 0x15, 0x14, + 0x55, 0xa1, 0xc4, 0xcd, 0x9e, 0xe2, 0x45, 0xdb, 0x97, 0xf1, 0x14, 0xfb, 0xb2, 0x0d, 0x59, 0x13, + 0xc7, 0x44, 0xec, 0x5f, 0xb1, 0x60, 0x4c, 0x7e, 0x59, 0x9f, 0x3c, 0x27, 0xdd, 0x5a, 0x31, 0xbf, + 0x19, 0x6f, 0x2d, 0xca, 0x33, 0x32, 0x88, 0xc1, 0x2a, 0x16, 0x8f, 0xc4, 0x2a, 0x5e, 0x81, 0x51, + 0xa7, 0xdd, 0xae, 0x9a, 0x7c, 0x26, 0x5b, 0x4a, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xec, 0x1f, 0x2d, + 0xc0, 0x84, 0xfc, 0x82, 0x5a, 0x67, 0x33, 0x24, 0x11, 0xda, 0x80, 0x92, 0xc3, 0x67, 0x89, 0xc8, + 0x45, 0xfe, 0x44, 0xb6, 0x1c, 0xc1, 0x98, 0xd2, 0xf8, 0xc2, 0x5f, 0x94, 0xb5, 0x71, 0x4c, 0x08, + 0x35, 0x61, 0xda, 0xf3, 0x23, 0x76, 0xf8, 0x2b, 0x78, 0x37, 0xd5, 0x4e, 0x92, 0xfa, 0x59, 0x41, + 0x7d, 0x7a, 0x3d, 0x49, 0x05, 0xa7, 0x09, 0xa3, 0x15, 0x29, 0x9b, 0x29, 0xe6, 0x0b, 0x03, 0xf4, + 0x89, 0xcb, 0x16, 0xcd, 0xd8, 0xbf, 0x6c, 0x41, 0x49, 0xa2, 0x9d, 0x84, 0x16, 0x6f, 0x0d, 0x86, + 0x43, 0x36, 0x09, 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x15, 0xdf, 0x69, 0xfc, 0x7f, 0x88, + 0x25, 0x0d, 0x26, 0x9a, 0x57, 0xdd, 0x7f, 0x97, 0x88, 0xe6, 0x55, 0x7f, 0x72, 0x2e, 0xa5, 0x3f, + 0x64, 0x7d, 0xd6, 0x64, 0x5d, 0x94, 0xf5, 0x6a, 0x07, 0x64, 0xcb, 0xbd, 0x97, 0x64, 0xbd, 0xaa, + 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x03, 0xc6, 0xea, 0x52, 0x26, 0x1b, 0xef, 0xf0, 0x8b, 0x5d, 0xf5, + 0x03, 0x4a, 0x95, 0xc4, 0x65, 0x21, 0xcb, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xcd, 0x08, 0x8a, 0xbd, + 0xcc, 0x08, 0x62, 0xba, 0xf9, 0x4a, 0xf5, 0x1f, 0xb7, 0x60, 0x88, 0xcb, 0xe2, 0xfa, 0x13, 0x85, + 0x6a, 0x9a, 0xb5, 0x78, 0xec, 0x6e, 0xd3, 0x42, 0xa1, 0x29, 0x43, 0x6b, 0x50, 0x62, 0x3f, 0x98, + 0x2c, 0xb1, 0x98, 0x6f, 0x75, 0xcf, 0x5b, 0xd5, 0x3b, 0x78, 0x5b, 0x56, 0xc3, 0x31, 0x05, 0xfb, + 0x87, 0x8b, 0xf4, 0x74, 0x8b, 0x51, 0x8d, 0x4b, 0xdf, 0x7a, 0x78, 0x97, 0x7e, 0xe1, 0x61, 0x5d, + 0xfa, 0xdb, 0x30, 0x59, 0xd7, 0xf4, 0x70, 0xf1, 0x4c, 0x5e, 0xea, 0xba, 0x48, 0x34, 0x95, 0x1d, + 0x97, 0xb2, 0x2c, 0x9b, 0x44, 0x70, 0x92, 0x2a, 0xfa, 0x36, 0x18, 0xe3, 0xf3, 0x2c, 0x5a, 0xe1, + 0x96, 0x18, 0x1f, 0xc8, 0x5f, 0x2f, 0x7a, 0x13, 0x5c, 0x2a, 0xa7, 0x55, 0xc7, 0x06, 0x31, 0xfb, + 0x4f, 0x2c, 0x40, 0x2b, 0xed, 0x1d, 0xd2, 0x22, 0x81, 0xd3, 0x8c, 0xc5, 0xe9, 0xdf, 0x6f, 0xc1, + 0x2c, 0x49, 0x15, 0x2f, 0xfb, 0xad, 0x96, 0x78, 0xb4, 0xe4, 0xbc, 0xab, 0x57, 0x72, 0xea, 0x28, + 0xb7, 0x84, 0xd9, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x83, 0x19, 0x7e, 0x4b, 0x2a, 0x80, 0x66, + 0x7b, 0xfd, 0xa8, 0x20, 0x3c, 0xb3, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x1a, 0x83, 0xdc, + 0x5e, 0xbc, 0xa7, 0x47, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e, 0xe1, 0x3d, 0x3d, 0xc2, 0x7b, 0x7a, + 0x84, 0x6f, 0x7a, 0x3d, 0xc2, 0x1f, 0x59, 0x30, 0x93, 0xbe, 0x06, 0x4e, 0x82, 0x31, 0xef, 0xc0, + 0x4c, 0xfa, 0xae, 0xeb, 0x6a, 0x67, 0x97, 0xee, 0x67, 0x7c, 0xef, 0x65, 0x7c, 0x03, 0xce, 0xa2, + 0x6f, 0xff, 0x9a, 0x05, 0xa7, 0x15, 0xb2, 0xf1, 0xd2, 0xff, 0x1c, 0xcc, 0xf0, 0xf3, 0x65, 0xb9, + 0xe9, 0xb8, 0xad, 0x0d, 0xd2, 0x6a, 0x37, 0x9d, 0x48, 0x9a, 0x19, 0x5c, 0xc9, 0xdc, 0xaa, 0x09, + 0x13, 0x5d, 0xa3, 0xe2, 0xd2, 0x23, 0xb4, 0x5f, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0x61, 0x94, 0x5a, + 0xe8, 0x69, 0x26, 0xfc, 0x0b, 0x23, 0x30, 0xb8, 0xb2, 0x47, 0xbc, 0xe8, 0x04, 0x26, 0xaa, 0x0e, + 0x13, 0xae, 0xb7, 0xe7, 0x37, 0xf7, 0x48, 0x83, 0xc3, 0x8f, 0xf2, 0xd0, 0x3f, 0x23, 0x48, 0x4f, + 0x54, 0x0c, 0x12, 0x38, 0x41, 0xf2, 0x61, 0x08, 0xdb, 0xaf, 0xc2, 0x10, 0xbf, 0xe3, 0x84, 0xa4, + 0x3d, 0xf3, 0x4a, 0x63, 0x83, 0x28, 0x6e, 0xee, 0x58, 0x11, 0xc0, 0xef, 0x50, 0x51, 0x1d, 0xbd, + 0x09, 0x13, 0x5b, 0x6e, 0x10, 0x46, 0x1b, 0x6e, 0x8b, 0x84, 0x91, 0xd3, 0x6a, 0x3f, 0x80, 0x70, + 0x5d, 0x8d, 0xc3, 0xaa, 0x41, 0x09, 0x27, 0x28, 0xa3, 0x6d, 0x18, 0x6f, 0x3a, 0x7a, 0x53, 0xc3, + 0x47, 0x6e, 0x4a, 0x5d, 0x9e, 0x37, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0x4f, 0x9b, 0x3a, 0x93, 0x0f, + 0x8f, 0x30, 0xa9, 0x89, 0x3a, 0x6d, 0xb8, 0x60, 0x98, 0xc3, 0x28, 0x1f, 0xc8, 0xec, 0x87, 0x4b, + 0x26, 0x1f, 0xa8, 0x59, 0x09, 0x7f, 0x16, 0x4a, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0xee, 0xdf, 0xcb, + 0xfd, 0xf5, 0x75, 0xcd, 0xad, 0x07, 0xbe, 0xa9, 0xd6, 0x58, 0x91, 0x94, 0x70, 0x4c, 0x14, 0x2d, + 0xc3, 0x50, 0x48, 0x02, 0x97, 0x84, 0xe2, 0x26, 0xee, 0x32, 0x8d, 0x0c, 0x8d, 0xbb, 0xde, 0xf0, + 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x12, 0x5f, 0x76, 0x57, 0x6a, 0xcb, 0x6b, 0x91, 0x95, + 0x62, 0x01, 0x45, 0xaf, 0xc1, 0x70, 0x40, 0x9a, 0x4c, 0x6f, 0x36, 0xde, 0xff, 0x22, 0xe7, 0x6a, + 0x38, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, 0x80, 0x50, 0x3e, 0xd2, 0xf5, 0xb6, 0x95, 0x55, + 0xad, 0xb8, 0x87, 0xd4, 0xb9, 0x85, 0x63, 0x0c, 0xe9, 0x05, 0x85, 0x33, 0xaa, 0xa1, 0xab, 0x30, + 0xad, 0x4a, 0x2b, 0x5e, 0x18, 0x39, 0xf4, 0xfc, 0x9f, 0x64, 0xb4, 0x94, 0x18, 0x07, 0x27, 0x11, + 0x70, 0xba, 0x8e, 0xfd, 0x25, 0x0b, 0xf8, 0x38, 0x9f, 0x80, 0xf0, 0xe2, 0x55, 0x53, 0x78, 0x71, + 0x36, 0x77, 0xe6, 0x72, 0x04, 0x17, 0x5f, 0xb2, 0x60, 0x54, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x5d, + 0xd6, 0x6c, 0x07, 0xa6, 0xe8, 0x4a, 0xbf, 0xb9, 0x19, 0x92, 0x60, 0x8f, 0x34, 0xd8, 0xc2, 0x2c, + 0x3c, 0xd8, 0xc2, 0x54, 0x16, 0x7c, 0x37, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0xb3, 0xb2, 0xab, + 0xca, 0xe0, 0xb1, 0xae, 0xe6, 0x3c, 0x61, 0xf0, 0xa8, 0x66, 0x15, 0xc7, 0x38, 0x74, 0xab, 0xed, + 0xf8, 0x61, 0x94, 0x34, 0x78, 0xbc, 0xe6, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x79, 0x80, 0x95, 0x7b, + 0xa4, 0xce, 0x57, 0xac, 0xfe, 0xb6, 0xb2, 0xf2, 0xdf, 0x56, 0xf6, 0x6f, 0x59, 0x30, 0xb1, 0xba, + 0x6c, 0xdc, 0x73, 0x0b, 0x00, 0xfc, 0x41, 0x78, 0xe7, 0xce, 0xba, 0xb4, 0x16, 0xe0, 0x0a, 0x5f, + 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x16, 0x8a, 0xcd, 0x8e, 0x27, 0xa4, 0xab, 0xc3, 0x94, 0x7b, 0xb8, + 0xd1, 0xf1, 0x30, 0x2d, 0xd3, 0x3c, 0x2e, 0x8a, 0x7d, 0x7b, 0x5c, 0xf4, 0x8c, 0x7c, 0x80, 0xe6, + 0x61, 0xf0, 0xee, 0x5d, 0xb7, 0xc1, 0xfd, 0x4b, 0x85, 0x25, 0xc3, 0x9d, 0x3b, 0x95, 0x72, 0x88, + 0x79, 0xb9, 0xfd, 0xc5, 0x22, 0xcc, 0xad, 0x36, 0xc9, 0xbd, 0x77, 0xe8, 0x63, 0xdb, 0xaf, 0xbf, + 0xc8, 0xd1, 0xe4, 0x54, 0x47, 0xf5, 0x09, 0xea, 0x3d, 0x1e, 0x5b, 0x30, 0xcc, 0xed, 0xfd, 0xa4, + 0xc7, 0xed, 0x2b, 0x59, 0xad, 0xe7, 0x0f, 0xc8, 0x02, 0xb7, 0x1b, 0x14, 0x0e, 0x83, 0xea, 0xc2, + 0x14, 0xa5, 0x58, 0x12, 0x9f, 0x7b, 0x19, 0xc6, 0x74, 0xcc, 0x23, 0x79, 0xe7, 0xfd, 0x95, 0x22, + 0x4c, 0xd1, 0x1e, 0x3c, 0xd4, 0x89, 0xb8, 0x95, 0x9e, 0x88, 0xe3, 0xf6, 0xd0, 0xea, 0x3d, 0x1b, + 0x6f, 0x24, 0x67, 0xe3, 0x4a, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0xa7, 0x05, 0x33, 0xab, 0x4d, + 0xbf, 0xbe, 0x9b, 0xf0, 0xa2, 0x7a, 0x11, 0x46, 0xe9, 0x71, 0x1c, 0x1a, 0x0e, 0xfe, 0x46, 0xc8, + 0x07, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xd6, 0xad, 0x4a, 0x39, 0x2b, 0x52, 0x84, 0x00, 0x61, + 0x1d, 0xcf, 0xfe, 0x0d, 0x0b, 0xce, 0x5d, 0x5d, 0x5e, 0x89, 0x97, 0x62, 0x2a, 0x58, 0xc5, 0x45, + 0x18, 0x6a, 0x37, 0xb4, 0xae, 0xc4, 0xd2, 0xe7, 0x32, 0xeb, 0x85, 0x80, 0xbe, 0x5b, 0x02, 0xb1, + 0xfc, 0xb4, 0x05, 0x33, 0x57, 0xdd, 0x88, 0xde, 0xae, 0xc9, 0xb0, 0x09, 0xf4, 0x7a, 0x0d, 0xdd, + 0xc8, 0x0f, 0xf6, 0x93, 0x61, 0x13, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xe7, 0x32, 0x4b, + 0xf3, 0x82, 0xa9, 0x87, 0xc3, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0x0d, 0x37, 0x60, 0x22, 0xcc, + 0x7d, 0x71, 0xc2, 0xaa, 0x0f, 0x2b, 0x4b, 0x00, 0x8e, 0x71, 0xe8, 0x6b, 0x6e, 0xfe, 0x6a, 0xb3, + 0x13, 0x46, 0x24, 0xd8, 0x0a, 0x73, 0x4e, 0xc7, 0xe7, 0xa1, 0x44, 0xa4, 0xc2, 0x40, 0xf4, 0x5a, + 0x71, 0x8c, 0x4a, 0x93, 0xc0, 0xa3, 0x37, 0x28, 0xbc, 0x3e, 0x7c, 0x32, 0x8f, 0xe6, 0x54, 0xb7, + 0x0a, 0x88, 0xe8, 0x6d, 0xe9, 0xe1, 0x2c, 0x98, 0x5f, 0xfc, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, + 0x3f, 0x66, 0xc1, 0x69, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0x7f, 0xb6, 0x00, 0xe3, 0xd7, 0x36, + 0x36, 0xaa, 0x57, 0x49, 0x24, 0xae, 0xed, 0xde, 0x66, 0x00, 0x58, 0xd3, 0x66, 0x76, 0x7b, 0xcc, + 0x75, 0x22, 0xb7, 0xb9, 0xc0, 0xa3, 0x22, 0x2d, 0x54, 0xbc, 0xe8, 0x66, 0x50, 0x8b, 0x02, 0xd7, + 0xdb, 0xce, 0xd4, 0x7f, 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0xe7, 0x61, 0x88, 0x85, 0x65, + 0x92, 0x93, 0xf0, 0xa8, 0x7a, 0x0b, 0xb1, 0xd2, 0xc3, 0x83, 0xf9, 0xd2, 0x2d, 0x5c, 0xe1, 0x7f, + 0xb0, 0x40, 0x45, 0xb7, 0x60, 0x74, 0x27, 0x8a, 0xda, 0xd7, 0x88, 0xd3, 0xa0, 0x4f, 0x77, 0x7e, + 0x1c, 0x9e, 0xcf, 0x3a, 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xf8, 0x04, 0x89, 0xcb, 0x42, 0xac, 0xd3, + 0xb1, 0x6b, 0x00, 0x31, 0xec, 0x98, 0x14, 0x39, 0xf6, 0x1f, 0x58, 0x30, 0xcc, 0x23, 0x64, 0x04, + 0xe8, 0xa3, 0x30, 0x40, 0xee, 0x91, 0xba, 0xe0, 0x78, 0x33, 0x3b, 0x1c, 0x73, 0x5a, 0x5c, 0x20, + 0x4d, 0xff, 0x63, 0x56, 0x0b, 0x5d, 0x83, 0x61, 0xda, 0xdb, 0xab, 0x2a, 0x5c, 0xc8, 0xe3, 0x79, + 0x5f, 0xac, 0xa6, 0x9d, 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0x7b, 0x5e, 0x6f, 0xd7, 0xe8, + 0x89, 0x1d, 0x75, 0x63, 0x2c, 0x36, 0x96, 0xab, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x73, 0x59, 0x88, + 0x63, 0x22, 0xf6, 0x06, 0x94, 0xe8, 0xa4, 0x2e, 0x36, 0x5d, 0xa7, 0xbb, 0x41, 0xc0, 0xd3, 0x50, + 0x92, 0xea, 0xfe, 0x50, 0x78, 0xc6, 0x33, 0xaa, 0xd2, 0x1a, 0x20, 0xc4, 0x31, 0xdc, 0xde, 0x82, + 0x53, 0xcc, 0x78, 0xd3, 0x89, 0x76, 0x8c, 0x3d, 0xd6, 0x7b, 0x31, 0x3f, 0x23, 0x1e, 0x90, 0x7c, + 0x66, 0x66, 0x35, 0xe7, 0xd3, 0x31, 0x49, 0x31, 0x7e, 0x4c, 0xda, 0x5f, 0x1b, 0x80, 0x47, 0x2b, + 0xb5, 0xfc, 0xe0, 0x29, 0x2f, 0xc1, 0x18, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0x25, + 0x89, 0xde, 0xd0, 0x60, 0xd8, 0xc0, 0x44, 0xe7, 0xa0, 0xe8, 0xbe, 0xe5, 0x25, 0x5d, 0xb3, 0x2a, + 0xaf, 0xaf, 0x63, 0x5a, 0x4e, 0xc1, 0x94, 0xc5, 0xe5, 0x77, 0x87, 0x02, 0x2b, 0x36, 0xf7, 0x55, + 0x98, 0x70, 0xc3, 0x7a, 0xe8, 0x56, 0x3c, 0x7a, 0xce, 0x68, 0x27, 0x95, 0x12, 0x6e, 0xd0, 0x4e, + 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0x17, 0xd9, 0x60, 0xdf, 0x6c, 0x72, 0x4f, 0x57, 0x71, 0xfa, 0x02, + 0x68, 0xb3, 0xaf, 0x0b, 0x99, 0x4a, 0x41, 0xbc, 0x00, 0xf8, 0x07, 0x87, 0x58, 0xc2, 0xe8, 0xcb, + 0xb1, 0xbe, 0xe3, 0xb4, 0x17, 0x3b, 0xd1, 0x4e, 0xd9, 0x0d, 0xeb, 0xfe, 0x1e, 0x09, 0xf6, 0xd9, + 0xa3, 0x7f, 0x24, 0x7e, 0x39, 0x2a, 0xc0, 0xf2, 0xb5, 0xc5, 0x2a, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, + 0x11, 0x26, 0x65, 0x61, 0x8d, 0x84, 0xec, 0x0a, 0x1b, 0x65, 0x64, 0x94, 0xb3, 0x94, 0x28, 0x56, + 0x44, 0x92, 0xf8, 0x26, 0x27, 0x0d, 0xc7, 0xc1, 0x49, 0x7f, 0x04, 0xc6, 0x5d, 0xcf, 0x8d, 0x5c, + 0x27, 0xf2, 0xb9, 0x3e, 0x8c, 0xbf, 0xef, 0x99, 0xa0, 0xbf, 0xa2, 0x03, 0xb0, 0x89, 0x67, 0xff, + 0xb7, 0x01, 0x98, 0x66, 0xd3, 0xf6, 0xde, 0x0a, 0xfb, 0x66, 0x5a, 0x61, 0xb7, 0xd2, 0x2b, 0xec, + 0x38, 0x9e, 0x08, 0x0f, 0xbc, 0xcc, 0xde, 0x84, 0x92, 0xf2, 0x0f, 0x93, 0x0e, 0xa2, 0x56, 0x8e, + 0x83, 0x68, 0x6f, 0xee, 0x43, 0x9a, 0xd8, 0x15, 0x33, 0x4d, 0xec, 0xfe, 0x96, 0x05, 0xb1, 0x82, + 0x07, 0x5d, 0x83, 0x52, 0xdb, 0x67, 0x96, 0xa3, 0x81, 0x34, 0xc7, 0x7e, 0x34, 0xf3, 0xa2, 0xe2, + 0x97, 0x22, 0xff, 0xf8, 0xaa, 0xac, 0x81, 0xe3, 0xca, 0x68, 0x09, 0x86, 0xdb, 0x01, 0xa9, 0x45, + 0x2c, 0x86, 0x4a, 0x4f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0x3f, 0x67, 0x01, 0x70, + 0x2b, 0x36, 0xc7, 0xdb, 0x26, 0x27, 0x20, 0xb5, 0x2e, 0xc3, 0x40, 0xd8, 0x26, 0xf5, 0x6e, 0x36, + 0xbd, 0x71, 0x7f, 0x6a, 0x6d, 0x52, 0x8f, 0x07, 0x9c, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xdd, 0x00, + 0x13, 0x31, 0x5a, 0x25, 0x22, 0x2d, 0xf4, 0xac, 0x11, 0x53, 0xe1, 0x6c, 0x22, 0xa6, 0x42, 0x89, + 0x61, 0x6b, 0x02, 0xd2, 0x37, 0xa1, 0xd8, 0x72, 0xee, 0x09, 0x09, 0xd8, 0xd3, 0xdd, 0xbb, 0x41, + 0xe9, 0x2f, 0xac, 0x39, 0xf7, 0xf8, 0x23, 0xf1, 0x69, 0xb9, 0x40, 0xd6, 0x9c, 0x7b, 0x87, 0xdc, + 0x72, 0x97, 0x1d, 0x52, 0x37, 0xdc, 0x30, 0xfa, 0xfc, 0x7f, 0x8d, 0xff, 0xb3, 0x65, 0x47, 0x1b, + 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0xd0, 0xea, 0xab, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x7d, 0xb4, + 0xe5, 0x7a, 0xe8, 0x6d, 0x18, 0x16, 0xf6, 0x93, 0x22, 0x86, 0xd1, 0xe5, 0x3e, 0xda, 0x13, 0xe6, + 0x97, 0xbc, 0xcd, 0xcb, 0xf2, 0x11, 0x2c, 0x4a, 0x7b, 0xb6, 0x2b, 0x1b, 0x44, 0x7f, 0xdd, 0x82, + 0x09, 0xf1, 0x1b, 0x93, 0xb7, 0x3a, 0x24, 0x8c, 0x04, 0xef, 0xf9, 0xe1, 0xfe, 0xfb, 0x20, 0x2a, + 0xf2, 0xae, 0x7c, 0x58, 0x1e, 0xb3, 0x26, 0xb0, 0x67, 0x8f, 0x12, 0xbd, 0x40, 0xff, 0xd0, 0x82, + 0x53, 0x2d, 0xe7, 0x1e, 0x6f, 0x91, 0x97, 0x61, 0x27, 0x72, 0x7d, 0x61, 0x87, 0xf0, 0xd1, 0xfe, + 0xa6, 0x3f, 0x55, 0x9d, 0x77, 0x52, 0x2a, 0x4b, 0x4f, 0x65, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, + 0xe6, 0xb6, 0x60, 0x44, 0xae, 0xb7, 0x0c, 0x51, 0x43, 0x59, 0x67, 0xac, 0x8f, 0x6c, 0xbe, 0xaa, + 0xc7, 0x2a, 0xa0, 0xed, 0x88, 0xb5, 0xf6, 0x50, 0xdb, 0x79, 0x13, 0xc6, 0xf4, 0x35, 0xf6, 0x50, + 0xdb, 0x7a, 0x0b, 0x66, 0x32, 0xd6, 0xd2, 0x43, 0x6d, 0xf2, 0x2e, 0x9c, 0xcd, 0x5d, 0x1f, 0x0f, + 0xb3, 0x61, 0xfb, 0x67, 0x2d, 0xfd, 0x1c, 0x3c, 0x01, 0xd5, 0xc1, 0xb2, 0xa9, 0x3a, 0x38, 0xdf, + 0x7d, 0xe7, 0xe4, 0xe8, 0x0f, 0xde, 0xd0, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x1a, 0x0c, 0x35, 0x69, + 0x89, 0xb4, 0xc2, 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x97, 0x62, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, + 0xb4, 0x60, 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x6c, 0x2e, 0x69, 0x11, 0x5e, 0x79, 0x01, + 0x3b, 0x77, 0x57, 0xee, 0x45, 0xc4, 0x0b, 0xd9, 0x53, 0x31, 0x73, 0x60, 0x7e, 0xd2, 0x82, 0x99, + 0x1b, 0xbe, 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x27, 0x41, 0xc5, 0xdb, 0x3e, 0x92, 0x09, 0x79, + 0xa1, 0xa7, 0x09, 0xf9, 0xb2, 0xb4, 0xc0, 0x1a, 0xc8, 0x9f, 0x3f, 0xca, 0x48, 0x26, 0xa3, 0xcc, + 0x18, 0xb6, 0xc2, 0x3b, 0x80, 0xf4, 0x5e, 0x0a, 0x87, 0x1e, 0x0c, 0xc3, 0x2e, 0xef, 0xaf, 0x98, + 0xc4, 0x27, 0xb3, 0x19, 0xbc, 0xd4, 0xe7, 0x69, 0xae, 0x2a, 0xbc, 0x00, 0x4b, 0x42, 0xf6, 0x4b, + 0x90, 0x19, 0x15, 0xa0, 0xb7, 0xf0, 0xc1, 0xfe, 0x24, 0x4c, 0xb3, 0x9a, 0x47, 0x7c, 0x18, 0xdb, + 0x09, 0xd9, 0x66, 0x46, 0xbc, 0x40, 0xfb, 0x0b, 0x16, 0x4c, 0xae, 0x27, 0xc2, 0xa8, 0x5d, 0x64, + 0xda, 0xd0, 0x0c, 0x91, 0x7a, 0x8d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x49, 0xd6, 0x5f, 0x58, 0x10, + 0x07, 0xea, 0x38, 0x01, 0xf6, 0x6d, 0xd9, 0x60, 0xdf, 0x32, 0x25, 0x2c, 0xaa, 0x3b, 0x79, 0xdc, + 0x1b, 0xba, 0xae, 0x42, 0x58, 0x75, 0x11, 0xae, 0xc4, 0x64, 0xf8, 0x52, 0x9c, 0x30, 0xe3, 0x5c, + 0xc9, 0xa0, 0x56, 0xf6, 0x6f, 0x17, 0x00, 0x29, 0xdc, 0xbe, 0x43, 0x6c, 0xa5, 0x6b, 0x1c, 0x4f, + 0x88, 0xad, 0x3d, 0x40, 0x4c, 0x9f, 0x1f, 0x38, 0x5e, 0xc8, 0xc9, 0xba, 0x42, 0x76, 0x77, 0x34, + 0x63, 0x81, 0x39, 0xd1, 0x24, 0xba, 0x91, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xec, 0x34, 0x06, 0xfb, + 0xb5, 0xd3, 0x18, 0xea, 0xe1, 0xb4, 0xf7, 0x33, 0x16, 0x8c, 0xab, 0x61, 0x7a, 0x97, 0x98, 0xd4, + 0xab, 0xfe, 0xe4, 0x1c, 0xa0, 0x55, 0xad, 0xcb, 0xec, 0x62, 0xf9, 0x56, 0xe6, 0x7c, 0xe9, 0x34, + 0xdd, 0xb7, 0x89, 0x0a, 0x70, 0x38, 0x2f, 0x9c, 0x29, 0x45, 0xe9, 0xe1, 0xc1, 0xfc, 0xb8, 0xfa, + 0xc7, 0x03, 0x2a, 0xc7, 0x55, 0xe8, 0x91, 0x3c, 0x99, 0x58, 0x8a, 0xe8, 0x45, 0x18, 0x6c, 0xef, + 0x38, 0x21, 0x49, 0xb8, 0x1e, 0x0d, 0x56, 0x69, 0xe1, 0xe1, 0xc1, 0xfc, 0x84, 0xaa, 0xc0, 0x4a, + 0x30, 0xc7, 0xee, 0x3f, 0x70, 0x59, 0x7a, 0x71, 0xf6, 0x0c, 0x5c, 0xf6, 0x27, 0x16, 0x0c, 0xac, + 0xfb, 0x8d, 0x93, 0x38, 0x02, 0x5e, 0x35, 0x8e, 0x80, 0xc7, 0xf2, 0x62, 0xdd, 0xe7, 0xee, 0xfe, + 0xd5, 0xc4, 0xee, 0x3f, 0x9f, 0x4b, 0xa1, 0xfb, 0xc6, 0x6f, 0xc1, 0x28, 0x8b, 0xa0, 0x2f, 0xdc, + 0xac, 0x9e, 0x37, 0x36, 0xfc, 0x7c, 0x62, 0xc3, 0x4f, 0x6a, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0x86, + 0x85, 0xdf, 0x4e, 0xd2, 0x87, 0x55, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xbc, 0x08, 0x46, 0xc4, 0x7e, + 0xf4, 0xcb, 0x16, 0x2c, 0x04, 0xdc, 0x9e, 0xb7, 0x51, 0xee, 0x04, 0xae, 0xb7, 0x5d, 0xab, 0xef, + 0x90, 0x46, 0xa7, 0xe9, 0x7a, 0xdb, 0x95, 0x6d, 0xcf, 0x57, 0xc5, 0x2b, 0xf7, 0x48, 0xbd, 0xc3, + 0x94, 0x60, 0x3d, 0xd2, 0x03, 0x28, 0xbb, 0xf8, 0xe7, 0xee, 0x1f, 0xcc, 0x2f, 0xe0, 0x23, 0xd1, + 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0x2e, 0xf3, 0x40, 0xf6, 0xfd, 0xf7, 0xbf, 0xcb, 0x6b, + 0xb9, 0x2a, 0x49, 0xc5, 0x44, 0x36, 0x48, 0xd0, 0x5a, 0xfa, 0x88, 0x18, 0xd0, 0xcb, 0xd5, 0xa3, + 0xb5, 0x85, 0x8f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x5c, 0x04, 0xb8, 0x12, 0x77, 0xc0, 0x8b, + 0xc6, 0x92, 0x78, 0x3c, 0xb1, 0x24, 0xa6, 0x0d, 0xe4, 0xe3, 0x39, 0xfe, 0x43, 0x98, 0xa6, 0x87, + 0xf3, 0x35, 0xe2, 0x04, 0xd1, 0x26, 0x71, 0xb8, 0xf9, 0x55, 0xf1, 0xc8, 0xa7, 0xbf, 0x12, 0xcf, + 0xdd, 0x48, 0x12, 0xc3, 0x69, 0xfa, 0xdf, 0x4c, 0x77, 0x8e, 0x07, 0x53, 0xa9, 0x18, 0x65, 0x9f, + 0x82, 0x92, 0x72, 0x3a, 0x11, 0x87, 0x4e, 0xf7, 0x50, 0x7f, 0x49, 0x0a, 0x5c, 0x84, 0x16, 0x3b, + 0x3c, 0xc5, 0xe4, 0xec, 0x7f, 0x54, 0x30, 0x1a, 0xe4, 0x93, 0xb8, 0x0e, 0x23, 0x4e, 0x18, 0xba, + 0xdb, 0x1e, 0x69, 0x88, 0x1d, 0xfb, 0xfe, 0xbc, 0x1d, 0x6b, 0x34, 0xc3, 0x1c, 0x7f, 0x16, 0x45, + 0x4d, 0xac, 0x68, 0xa0, 0x6b, 0xdc, 0xc8, 0x6d, 0x4f, 0xbe, 0xf7, 0xfa, 0xa3, 0x06, 0xd2, 0x0c, + 0x6e, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x9a, 0x5b, 0x21, 0x5e, 0xf7, 0xfc, 0xbb, 0xde, 0x55, 0xdf, + 0x97, 0x41, 0x24, 0xfa, 0x23, 0x38, 0x2d, 0x6d, 0x0f, 0x55, 0x75, 0x6c, 0x52, 0xeb, 0x2f, 0xe8, + 0xe7, 0xe7, 0x60, 0x86, 0x92, 0x36, 0x7d, 0xbc, 0x43, 0x44, 0x60, 0x52, 0x44, 0x4f, 0x93, 0x65, + 0x62, 0xec, 0x32, 0x9f, 0x72, 0x66, 0xed, 0x58, 0x8e, 0x7c, 0xdd, 0x24, 0x81, 0x93, 0x34, 0xed, + 0x9f, 0xb2, 0x80, 0xf9, 0xbb, 0x9e, 0x00, 0x3f, 0xf2, 0x31, 0x93, 0x1f, 0x99, 0xcd, 0x1b, 0xe4, + 0x1c, 0x56, 0xe4, 0x05, 0xbe, 0xb2, 0xaa, 0x81, 0x7f, 0x6f, 0x5f, 0x98, 0x8e, 0xf4, 0x7e, 0x7f, + 0xd8, 0xff, 0xd7, 0xe2, 0x87, 0x98, 0x72, 0x09, 0x41, 0xdf, 0x0e, 0x23, 0x75, 0xa7, 0xed, 0xd4, + 0x79, 0x7a, 0x99, 0x5c, 0x89, 0x9e, 0x51, 0x69, 0x61, 0x59, 0xd4, 0xe0, 0x12, 0x2a, 0x19, 0x85, + 0x6f, 0x44, 0x16, 0xf7, 0x94, 0x4a, 0xa9, 0x26, 0xe7, 0x76, 0x61, 0xdc, 0x20, 0xf6, 0x50, 0xc5, + 0x19, 0xdf, 0xce, 0xaf, 0x58, 0x15, 0x35, 0xb2, 0x05, 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0xf2, + 0x71, 0xf9, 0xfe, 0x5e, 0x97, 0x28, 0xbb, 0x7d, 0x34, 0x57, 0xda, 0x04, 0x19, 0x9c, 0xa6, 0x6c, + 0xff, 0x84, 0x05, 0x8f, 0xe8, 0x88, 0x9a, 0xb7, 0x4e, 0x2f, 0x1d, 0x41, 0x19, 0x46, 0xfc, 0x36, + 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x49, 0x0e, 0xfa, 0x4d, 0x51, 0x7e, 0x28, 0x82, 0xb3, + 0x4b, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xf4, 0xf5, 0xc9, 0x06, 0x23, 0x14, 0x7e, 0x59, 0xec, 0x0c, + 0x60, 0xea, 0xf2, 0x10, 0x0b, 0x88, 0xfd, 0x35, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x2d, 0x98, + 0x6a, 0x39, 0x51, 0x7d, 0x67, 0xe5, 0x5e, 0x3b, 0xe0, 0x1a, 0x17, 0x39, 0x4e, 0x4f, 0xf7, 0x1a, + 0x27, 0xed, 0x23, 0x63, 0xc3, 0xca, 0xb5, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x36, 0x61, 0x94, 0x95, + 0x31, 0x97, 0xc3, 0xb0, 0x1b, 0x6b, 0x90, 0xd7, 0x9a, 0xb2, 0x38, 0x58, 0x8b, 0xe9, 0x60, 0x9d, + 0xa8, 0xfd, 0xe5, 0x22, 0xdf, 0xed, 0x8c, 0x95, 0x7f, 0x0a, 0x86, 0xdb, 0x7e, 0x63, 0xb9, 0x52, + 0xc6, 0x62, 0x16, 0xd4, 0x35, 0x52, 0xe5, 0xc5, 0x58, 0xc2, 0xd1, 0x25, 0x18, 0x11, 0x3f, 0xa5, + 0x86, 0x8c, 0x9d, 0xcd, 0x02, 0x2f, 0xc4, 0x0a, 0x8a, 0x9e, 0x03, 0x68, 0x07, 0xfe, 0x9e, 0xdb, + 0x60, 0xa1, 0x30, 0x8a, 0xa6, 0xb1, 0x50, 0x55, 0x41, 0xb0, 0x86, 0x85, 0x5e, 0x81, 0xf1, 0x8e, + 0x17, 0x72, 0x76, 0x44, 0x0b, 0x7c, 0xab, 0xcc, 0x58, 0x6e, 0xe9, 0x40, 0x6c, 0xe2, 0xa2, 0x45, + 0x18, 0x8a, 0x1c, 0x66, 0xfc, 0x32, 0x98, 0x6f, 0x7c, 0xbb, 0x41, 0x31, 0xf4, 0x4c, 0x26, 0xb4, + 0x02, 0x16, 0x15, 0xd1, 0xa7, 0xa4, 0xf7, 0x2f, 0x3f, 0xd8, 0x85, 0xd5, 0x7b, 0x7f, 0x97, 0x80, + 0xe6, 0xfb, 0x2b, 0xac, 0xe9, 0x0d, 0x5a, 0xe8, 0x65, 0x00, 0x72, 0x2f, 0x22, 0x81, 0xe7, 0x34, + 0x95, 0x6d, 0x99, 0xe2, 0x0b, 0xca, 0xfe, 0xba, 0x1f, 0xdd, 0x0a, 0xc9, 0x8a, 0xc2, 0xc0, 0x1a, + 0xb6, 0xfd, 0x1b, 0x25, 0x80, 0x98, 0x6f, 0x47, 0x6f, 0xa7, 0x0e, 0xae, 0x67, 0xba, 0x73, 0xfa, + 0xc7, 0x77, 0x6a, 0xa1, 0xef, 0xb1, 0x60, 0xd4, 0x69, 0x36, 0xfd, 0xba, 0xc3, 0x43, 0x13, 0x17, + 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xc5, 0xb8, 0x06, 0xef, 0xc2, 0xf3, 0x72, 0x85, 0x6a, 0x90, 0x9e, + 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x92, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea, 0xa9, 0x58, 0x62, 0x77, + 0x84, 0xfe, 0x4a, 0xbc, 0x65, 0xbc, 0x12, 0x07, 0xf2, 0xdd, 0x1b, 0x0d, 0xf6, 0xb5, 0xd7, 0x03, + 0x11, 0x55, 0xf5, 0x50, 0x07, 0x83, 0xf9, 0xbe, 0x84, 0xda, 0x3b, 0xa9, 0x47, 0x98, 0x83, 0x37, + 0x61, 0xb2, 0x61, 0x32, 0x01, 0x62, 0x25, 0x3e, 0x99, 0x47, 0x37, 0xc1, 0x33, 0xc4, 0xd7, 0x7e, + 0x02, 0x80, 0x93, 0x84, 0x51, 0x95, 0x47, 0xbe, 0xa8, 0x78, 0x5b, 0xbe, 0xf0, 0xbc, 0xb0, 0x73, + 0xe7, 0x72, 0x3f, 0x8c, 0x48, 0x8b, 0x62, 0xc6, 0xb7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, 0x15, 0xf4, + 0x1a, 0x0c, 0x31, 0x67, 0xb2, 0x70, 0x76, 0x24, 0x5f, 0xe2, 0x6c, 0x86, 0x72, 0x8b, 0x37, 0x24, + 0xfb, 0x1b, 0x62, 0x41, 0x01, 0x5d, 0x93, 0xae, 0x9a, 0x61, 0xc5, 0xbb, 0x15, 0x12, 0xe6, 0xaa, + 0x59, 0x5a, 0x7a, 0x7f, 0xec, 0x85, 0xc9, 0xcb, 0x33, 0xf3, 0x9d, 0x19, 0x35, 0x29, 0x17, 0x25, + 0xfe, 0xcb, 0x34, 0x6a, 0xb3, 0x90, 0xdf, 0x3d, 0x33, 0xd5, 0x5a, 0x3c, 0x9c, 0xb7, 0x4d, 0x12, + 0x38, 0x49, 0x93, 0x72, 0xa4, 0x7c, 0xd7, 0x0b, 0xdf, 0x8d, 0x5e, 0x67, 0x07, 0x7f, 0x88, 0xb3, + 0xdb, 0x88, 0x97, 0x60, 0x51, 0xff, 0x44, 0xd9, 0x83, 0x39, 0x0f, 0xa6, 0x92, 0x5b, 0xf4, 0xa1, + 0xb2, 0x23, 0x7f, 0x30, 0x00, 0x13, 0xe6, 0x92, 0x42, 0x97, 0xa1, 0x24, 0x88, 0xa8, 0xd4, 0x07, + 0x6a, 0x97, 0xac, 0x49, 0x00, 0x8e, 0x71, 0x58, 0xc6, 0x0b, 0x56, 0x5d, 0x33, 0xd6, 0x8d, 0x33, + 0x5e, 0x28, 0x08, 0xd6, 0xb0, 0xe8, 0xc3, 0x6a, 0xd3, 0xf7, 0x23, 0x75, 0x21, 0xa9, 0x75, 0xb7, + 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x8b, 0x68, 0x97, 0x04, 0x1e, 0x69, 0x9a, 0x41, 0x92, 0xd5, 0x45, + 0x74, 0x5d, 0x07, 0x62, 0x13, 0x97, 0x5e, 0xa7, 0x7e, 0xc8, 0x16, 0xb2, 0x78, 0xbe, 0xc5, 0xc6, + 0xcf, 0x35, 0xee, 0x2d, 0x2e, 0xe1, 0xe8, 0x93, 0xf0, 0x88, 0x0a, 0x04, 0x85, 0xb9, 0x36, 0x43, + 0xb6, 0x38, 0x64, 0x48, 0x5b, 0x1e, 0x59, 0xce, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xab, 0x30, 0x21, + 0x58, 0x7c, 0x49, 0x71, 0xd8, 0x34, 0xb0, 0xb9, 0x6e, 0x40, 0x71, 0x02, 0x5b, 0x86, 0x79, 0x66, + 0x5c, 0xb6, 0xa4, 0x30, 0x92, 0x0e, 0xf3, 0xac, 0xc3, 0x71, 0xaa, 0x06, 0x5a, 0x84, 0x49, 0xce, + 0x83, 0xb9, 0xde, 0x36, 0x9f, 0x13, 0xe1, 0x5a, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, 0x93, 0xf8, + 0xe8, 0x25, 0x18, 0x73, 0x82, 0xfa, 0x8e, 0x1b, 0x91, 0x7a, 0xd4, 0x09, 0xb8, 0xcf, 0x95, 0x66, + 0xa1, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0d, 0x33, 0x19, 0x61, 0x24, 0xe8, 0xc2, 0x71, + 0xda, 0xae, 0xfc, 0xa6, 0x84, 0x19, 0xf3, 0x62, 0xb5, 0x22, 0xbf, 0x46, 0xc3, 0xa2, 0xab, 0x93, + 0x85, 0x9b, 0xd0, 0xb2, 0x26, 0xaa, 0xd5, 0xb9, 0x2a, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x57, 0x01, + 0x26, 0x33, 0x74, 0x2b, 0x2c, 0x73, 0x5f, 0xe2, 0x91, 0x12, 0x27, 0xea, 0x33, 0xa3, 0x86, 0x17, + 0x8e, 0x10, 0x35, 0xbc, 0xd8, 0x2b, 0x6a, 0xf8, 0xc0, 0x3b, 0x89, 0x1a, 0x6e, 0x8e, 0xd8, 0x60, + 0x5f, 0x23, 0x96, 0x11, 0x69, 0x7c, 0xe8, 0x88, 0x91, 0xc6, 0x8d, 0x41, 0x1f, 0xee, 0x63, 0xd0, + 0x7f, 0xb8, 0x00, 0x53, 0x49, 0x4b, 0xca, 0x13, 0x90, 0xdb, 0xbe, 0x66, 0xc8, 0x6d, 0x2f, 0xf5, + 0xe3, 0x38, 0x9b, 0x2b, 0xc3, 0xc5, 0x09, 0x19, 0xee, 0x07, 0xfb, 0xa2, 0xd6, 0x5d, 0x9e, 0xfb, + 0x77, 0x0a, 0x70, 0x3a, 0xd3, 0x73, 0xf7, 0x04, 0xc6, 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0xf6, 0xed, + 0x54, 0x9c, 0x3b, 0x40, 0x77, 0x12, 0x03, 0x74, 0xb9, 0x7f, 0x92, 0xdd, 0x47, 0xe9, 0xab, 0x45, + 0x38, 0x9f, 0x59, 0x2f, 0x16, 0x7b, 0xae, 0x1a, 0x62, 0xcf, 0xe7, 0x12, 0x62, 0x4f, 0xbb, 0x7b, + 0xed, 0xe3, 0x91, 0x83, 0x0a, 0x77, 0x59, 0x16, 0x13, 0xe1, 0x01, 0x65, 0xa0, 0x86, 0xbb, 0xac, + 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0xb2, 0xcf, 0x7f, 0x67, 0xc1, 0xd9, 0xcc, 0xb9, 0x39, 0x01, + 0x59, 0xd7, 0xba, 0x29, 0xeb, 0x7a, 0xaa, 0xef, 0xd5, 0x9a, 0x23, 0xfc, 0xfa, 0xb5, 0x81, 0x9c, + 0x6f, 0x61, 0x2f, 0xf9, 0x9b, 0x30, 0xea, 0xd4, 0xeb, 0x24, 0x0c, 0xd7, 0xfc, 0x86, 0x0a, 0x8c, + 0xfc, 0x2c, 0x7b, 0x67, 0xc5, 0xc5, 0x87, 0x07, 0xf3, 0x73, 0x49, 0x12, 0x31, 0x18, 0xeb, 0x14, + 0xd0, 0xa7, 0x61, 0x24, 0x14, 0xf7, 0xa6, 0x98, 0xfb, 0xe7, 0xfb, 0x1c, 0x1c, 0x67, 0x93, 0x34, + 0xcd, 0xc8, 0x4d, 0x4a, 0x52, 0xa1, 0x48, 0x9a, 0x51, 0x5e, 0x0a, 0xc7, 0x1a, 0xe5, 0xe5, 0x39, + 0x80, 0x3d, 0xf5, 0x18, 0x48, 0xca, 0x1f, 0xb4, 0x67, 0x82, 0x86, 0x85, 0x3e, 0x0e, 0x53, 0x21, + 0x0f, 0x6d, 0xb8, 0xdc, 0x74, 0x42, 0xe6, 0x2c, 0x23, 0x56, 0x21, 0x8b, 0x0e, 0x55, 0x4b, 0xc0, + 0x70, 0x0a, 0x1b, 0xad, 0xca, 0x56, 0x59, 0x1c, 0x46, 0xbe, 0x30, 0x2f, 0xc6, 0x2d, 0x8a, 0xbc, + 0xc1, 0xa7, 0x92, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0x3e, 0x0d, 0x40, 0x97, 0x8f, 0x90, 0x43, + 0x0c, 0xe7, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0x64, 0xda, 0xf6, 0x32, 0x0f, 0xd7, 0xb2, 0x22, 0x82, + 0x35, 0x82, 0xf6, 0x0f, 0x0f, 0xc0, 0xa3, 0x5d, 0xce, 0x48, 0xb4, 0x68, 0xea, 0x61, 0x9f, 0x4e, + 0x3e, 0xae, 0xe7, 0x32, 0x2b, 0x1b, 0xaf, 0xed, 0xc4, 0x52, 0x2c, 0xbc, 0xe3, 0xa5, 0xf8, 0x03, + 0x96, 0x26, 0xf6, 0xe0, 0x16, 0x9f, 0x1f, 0x3b, 0xe2, 0xd9, 0x7f, 0x8c, 0x72, 0x90, 0xad, 0x0c, + 0x61, 0xc2, 0x73, 0x7d, 0x77, 0xa7, 0x6f, 0xe9, 0xc2, 0xc9, 0x4a, 0x89, 0x7f, 0xcb, 0x82, 0x73, + 0x5d, 0x43, 0x7c, 0x7c, 0x03, 0x32, 0x0c, 0xf6, 0xe7, 0x2d, 0x78, 0x3c, 0xb3, 0x86, 0x61, 0x66, + 0x74, 0x19, 0x4a, 0x75, 0x5a, 0xa8, 0x79, 0x69, 0xc6, 0xee, 0xeb, 0x12, 0x80, 0x63, 0x9c, 0x23, + 0x86, 0x2f, 0xf9, 0x15, 0x0b, 0x52, 0x9b, 0xfe, 0x04, 0x6e, 0x9f, 0x8a, 0x79, 0xfb, 0xbc, 0xbf, + 0x9f, 0xd1, 0xcc, 0xb9, 0x78, 0xfe, 0x78, 0x12, 0xce, 0xe4, 0x78, 0x29, 0xed, 0xc1, 0xf4, 0x76, + 0x9d, 0x98, 0xfe, 0xaf, 0xdd, 0xa2, 0xc8, 0x74, 0x75, 0x96, 0x65, 0x99, 0x4d, 0xa7, 0x53, 0x28, + 0x38, 0xdd, 0x04, 0xfa, 0xbc, 0x05, 0xa7, 0x9c, 0xbb, 0xe1, 0x0a, 0xe5, 0x22, 0xdc, 0xfa, 0x52, + 0xd3, 0xaf, 0xef, 0xd2, 0x23, 0x5a, 0x6e, 0x84, 0x17, 0x32, 0x25, 0x3b, 0x77, 0x6a, 0x29, 0x7c, + 0xa3, 0x79, 0x96, 0xea, 0x35, 0x0b, 0x0b, 0x67, 0xb6, 0x85, 0xb0, 0x88, 0xff, 0x4f, 0xdf, 0x28, + 0x5d, 0x3c, 0xb4, 0xb3, 0xdc, 0xc9, 0xf8, 0xb5, 0x28, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x85, 0xd2, + 0xb6, 0xf4, 0xf1, 0xcc, 0xb8, 0x76, 0xe3, 0x81, 0xec, 0xee, 0xf9, 0xca, 0xd5, 0xb3, 0x0a, 0x09, + 0xc7, 0x44, 0xd1, 0xab, 0x50, 0xf4, 0xb6, 0xc2, 0x6e, 0xd9, 0x52, 0x13, 0x76, 0x78, 0x3c, 0x0e, + 0xc2, 0xfa, 0x6a, 0x0d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, 0x83, 0xcd, 0x86, 0x10, 0x4b, 0x66, 0x6e, + 0x52, 0xbc, 0x54, 0xce, 0xe9, 0x15, 0xa3, 0x84, 0x97, 0xca, 0x98, 0x92, 0x40, 0x55, 0x18, 0x64, + 0xae, 0x3d, 0xe2, 0x92, 0xcb, 0x64, 0xe7, 0xbb, 0xb8, 0xc8, 0xf1, 0x60, 0x09, 0x0c, 0x01, 0x73, + 0x42, 0x68, 0x03, 0x86, 0xea, 0x2c, 0xb3, 0xa6, 0x88, 0x1b, 0xf7, 0xa1, 0x4c, 0x01, 0x64, 0x97, + 0x94, 0xa3, 0x42, 0x1e, 0xc7, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x28, 0x32, + 0x41, 0x67, 0x53, 0xed, 0x92, 0x49, 0x57, 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0x2f, 0x43, 0x61, + 0xab, 0x2e, 0xdc, 0x76, 0x32, 0x25, 0x91, 0x66, 0x28, 0x8b, 0xa5, 0xa1, 0xfb, 0x07, 0xf3, 0x85, + 0xd5, 0x65, 0x5c, 0xd8, 0xaa, 0xa3, 0x75, 0x18, 0xde, 0xe2, 0xce, 0xef, 0x42, 0xd8, 0xf8, 0x64, + 0xb6, 0x5f, 0x7e, 0xca, 0x3f, 0x9e, 0x7b, 0xac, 0x08, 0x00, 0x96, 0x44, 0x58, 0x38, 0x7d, 0xe5, + 0xc4, 0x2f, 0x42, 0xac, 0x2d, 0x1c, 0x2d, 0xf0, 0x02, 0x67, 0x3a, 0xe2, 0x50, 0x00, 0x58, 0xa3, + 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xf1, 0x8b, 0x60, 0x33, 0x99, 0xab, 0x5a, 0xe5, 0xec, 0xef, 0xb6, + 0xaa, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0x5d, 0x18, 0xdf, 0x0b, 0xdb, 0x3b, 0x44, 0x6e, 0x69, 0x16, + 0x7b, 0x26, 0xe7, 0x5e, 0xbe, 0x2d, 0x10, 0xdd, 0x20, 0xea, 0x38, 0xcd, 0xd4, 0x29, 0xc4, 0x74, + 0xfa, 0xb7, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xad, 0x8e, 0xbf, 0xb9, 0x1f, 0x11, 0x11, + 0x19, 0x2d, 0x73, 0xf8, 0x5f, 0xe7, 0x28, 0xe9, 0xe1, 0x17, 0x00, 0x2c, 0x89, 0xa0, 0xdb, 0x62, + 0x78, 0xd8, 0xe9, 0x39, 0x95, 0x1f, 0xbe, 0x74, 0x51, 0x22, 0xe5, 0x0c, 0x0a, 0x3b, 0x2d, 0x63, + 0x52, 0xec, 0x94, 0x6c, 0xef, 0xf8, 0x91, 0xef, 0x25, 0x4e, 0xe8, 0xe9, 0xfc, 0x53, 0xb2, 0x9a, + 0x81, 0x9f, 0x3e, 0x25, 0xb3, 0xb0, 0x70, 0x66, 0x5b, 0xa8, 0x01, 0x13, 0x6d, 0x3f, 0x88, 0xee, + 0xfa, 0x81, 0x5c, 0x5f, 0xa8, 0x8b, 0xb0, 0xc4, 0xc0, 0x14, 0x2d, 0xb2, 0xa0, 0x83, 0x26, 0x04, + 0x27, 0x68, 0xa2, 0x4f, 0xc0, 0x70, 0x58, 0x77, 0x9a, 0xa4, 0x72, 0x73, 0x76, 0x26, 0xff, 0xfa, + 0xa9, 0x71, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xef, 0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x64, + 0xe9, 0xd2, 0x58, 0x18, 0xbf, 0x9c, 0x28, 0xac, 0x29, 0xab, 0x68, 0x7e, 0x36, 0xb1, 0x62, 0xcc, + 0xab, 0xd3, 0x3d, 0x20, 0xde, 0x0c, 0x7e, 0x38, 0x7b, 0x3a, 0x7f, 0x0f, 0x88, 0xa7, 0xc6, 0xcd, + 0x5a, 0xb7, 0x3d, 0xa0, 0x90, 0x70, 0x4c, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x3d, 0xd3, 0xc5, 0x9c, + 0x27, 0xf7, 0x2c, 0x65, 0x27, 0x33, 0x3d, 0x49, 0x29, 0x09, 0xfb, 0xf7, 0x86, 0xd3, 0x3c, 0x0b, + 0x7b, 0x65, 0x7e, 0x97, 0x95, 0x52, 0x40, 0x7e, 0xb8, 0x5f, 0xa1, 0xd7, 0x31, 0xb2, 0xe0, 0x9f, + 0xb7, 0xe0, 0x4c, 0x3b, 0xf3, 0x43, 0x04, 0x03, 0xd0, 0x9f, 0xec, 0x8c, 0x7f, 0xba, 0x0a, 0xf9, + 0x98, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x9f, 0x39, 0xc5, 0x77, 0xfc, 0xcc, 0x59, 0x83, 0x11, 0xc6, + 0x64, 0xf6, 0xc8, 0x34, 0x9d, 0x7c, 0xed, 0x31, 0x56, 0x62, 0x59, 0x54, 0xc4, 0x8a, 0x04, 0xfa, + 0x41, 0x0b, 0xce, 0x25, 0xbb, 0x8e, 0x09, 0x03, 0x8b, 0x38, 0x91, 0xfc, 0x81, 0xbb, 0x2a, 0xbe, + 0x3f, 0xc5, 0xff, 0x1b, 0xc8, 0x87, 0xbd, 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x39, 0xe3, 0x85, 0x3d, + 0x64, 0x6a, 0x15, 0xfa, 0x78, 0x65, 0xbf, 0x00, 0x63, 0x2d, 0xbf, 0xe3, 0x45, 0xc2, 0xfa, 0x47, + 0x58, 0x22, 0x30, 0x0d, 0xfc, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0xc4, 0xdb, 0x7c, 0xe4, 0x81, 0xdf, + 0xe6, 0x6f, 0xc0, 0x98, 0xa7, 0x99, 0xab, 0x0a, 0x7e, 0xe0, 0x62, 0x7e, 0x8c, 0x57, 0xdd, 0xb8, + 0x95, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, 0xf0, 0x7d, 0xc9, 0xca, 0x60, 0xea, 0xb9, + 0x08, 0xe0, 0xa3, 0xa6, 0x08, 0xe0, 0x62, 0x52, 0x04, 0x90, 0x92, 0x28, 0x1b, 0xaf, 0xff, 0xfe, + 0x53, 0xd8, 0xf4, 0x1b, 0x08, 0xd1, 0x6e, 0xc2, 0x85, 0x5e, 0xd7, 0x12, 0x33, 0x03, 0x6b, 0x28, + 0xfd, 0x61, 0x6c, 0x06, 0xd6, 0xa8, 0x94, 0x31, 0x83, 0xf4, 0x1b, 0x62, 0xc7, 0xfe, 0x1f, 0x16, + 0x14, 0xab, 0x7e, 0xe3, 0x04, 0x1e, 0xbc, 0x1f, 0x33, 0x1e, 0xbc, 0x8f, 0x66, 0x5f, 0x88, 0x8d, + 0x5c, 0x79, 0xf8, 0x4a, 0x42, 0x1e, 0x7e, 0x2e, 0x8f, 0x40, 0x77, 0xe9, 0xf7, 0x4f, 0x16, 0x61, + 0xb4, 0xea, 0x37, 0x94, 0x0d, 0xf6, 0xaf, 0x3d, 0x88, 0x0d, 0x76, 0x6e, 0x22, 0x06, 0x8d, 0x32, + 0xb3, 0x1e, 0x93, 0xee, 0xa7, 0xdf, 0x60, 0xa6, 0xd8, 0x77, 0x88, 0xbb, 0xbd, 0x13, 0x91, 0x46, + 0xf2, 0x73, 0x4e, 0xce, 0x14, 0xfb, 0xbf, 0x5b, 0x30, 0x99, 0x68, 0x1d, 0x35, 0x61, 0xbc, 0xa9, + 0x4b, 0x5b, 0xc5, 0x3a, 0x7d, 0x20, 0x41, 0xad, 0x30, 0x65, 0xd5, 0x8a, 0xb0, 0x49, 0x1c, 0x2d, + 0x00, 0x28, 0xf5, 0xa3, 0x14, 0xeb, 0x31, 0xae, 0x5f, 0xe9, 0x27, 0x43, 0xac, 0x61, 0xa0, 0x17, + 0x61, 0x34, 0xf2, 0xdb, 0x7e, 0xd3, 0xdf, 0xde, 0xbf, 0x4e, 0x64, 0x50, 0x27, 0x65, 0xa0, 0xb6, + 0x11, 0x83, 0xb0, 0x8e, 0x67, 0xff, 0x74, 0x91, 0x7f, 0xa8, 0x17, 0xb9, 0xef, 0xad, 0xc9, 0x77, + 0xf7, 0x9a, 0xfc, 0xaa, 0x05, 0x53, 0xb4, 0x75, 0x66, 0x03, 0x23, 0x2f, 0x5b, 0x15, 0xdb, 0xd9, + 0xea, 0x12, 0xdb, 0xf9, 0x22, 0x3d, 0xbb, 0x1a, 0x7e, 0x27, 0x12, 0x12, 0x34, 0xed, 0x70, 0xa2, + 0xa5, 0x58, 0x40, 0x05, 0x1e, 0x09, 0x02, 0xe1, 0xb7, 0xa7, 0xe3, 0x91, 0x20, 0xc0, 0x02, 0x2a, + 0x43, 0x3f, 0x0f, 0x64, 0x87, 0x7e, 0xe6, 0x21, 0x2a, 0x85, 0xb5, 0x84, 0x60, 0x7b, 0xb4, 0x10, + 0x95, 0xd2, 0x8c, 0x22, 0xc6, 0xb1, 0x7f, 0xb6, 0x08, 0x63, 0x55, 0xbf, 0x11, 0x2b, 0x00, 0x5f, + 0x30, 0x14, 0x80, 0x17, 0x12, 0x0a, 0xc0, 0x29, 0x1d, 0xf7, 0x3d, 0x75, 0xdf, 0xd7, 0x4b, 0xdd, + 0xf7, 0xcf, 0x2d, 0x36, 0x6b, 0xe5, 0xf5, 0x1a, 0x37, 0xa9, 0x42, 0x57, 0x60, 0x94, 0x1d, 0x48, + 0xcc, 0x51, 0x54, 0x6a, 0xc5, 0x58, 0x4a, 0xa3, 0xf5, 0xb8, 0x18, 0xeb, 0x38, 0xe8, 0x12, 0x8c, + 0x84, 0xc4, 0x09, 0xea, 0x3b, 0xea, 0x8c, 0x13, 0x2a, 0x2c, 0x5e, 0x86, 0x15, 0x14, 0xbd, 0x1e, + 0x47, 0x47, 0x2c, 0xe6, 0x3b, 0x9e, 0xe9, 0xfd, 0xe1, 0x5b, 0x24, 0x3f, 0x24, 0xa2, 0x7d, 0x07, + 0x50, 0x1a, 0xbf, 0x8f, 0xb0, 0x60, 0xf3, 0x66, 0x58, 0xb0, 0x52, 0x2a, 0x24, 0xd8, 0x9f, 0x5b, + 0x30, 0x51, 0xf5, 0x1b, 0x74, 0xeb, 0x7e, 0x33, 0xed, 0x53, 0x3d, 0x34, 0xec, 0x50, 0x97, 0xd0, + 0xb0, 0x4f, 0xc0, 0x60, 0xd5, 0x6f, 0x54, 0xaa, 0xdd, 0xbc, 0xbe, 0xed, 0xbf, 0x6b, 0xc1, 0x70, + 0xd5, 0x6f, 0x9c, 0x80, 0x70, 0xfe, 0xa3, 0xa6, 0x70, 0xfe, 0x91, 0x9c, 0x75, 0x93, 0x23, 0x8f, + 0xff, 0xdb, 0x03, 0x30, 0x4e, 0xfb, 0xe9, 0x6f, 0xcb, 0xa9, 0x34, 0x86, 0xcd, 0xea, 0x63, 0xd8, + 0x28, 0x2f, 0xec, 0x37, 0x9b, 0xfe, 0xdd, 0xe4, 0xb4, 0xae, 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, + 0x18, 0x69, 0x07, 0x64, 0xcf, 0xf5, 0x05, 0x93, 0xa9, 0xa9, 0x3a, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, + 0xfa, 0x38, 0x0b, 0x5d, 0xaf, 0x4e, 0x6a, 0xa4, 0xee, 0x7b, 0x0d, 0x2e, 0xbf, 0x2e, 0x8a, 0xf4, + 0x0e, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x1d, 0x28, 0xb1, 0xff, 0xec, 0xd8, 0x39, 0x7a, 0xa2, 0x50, + 0x91, 0x38, 0x4e, 0x10, 0xc0, 0x31, 0x2d, 0xf4, 0x1c, 0x40, 0x24, 0x63, 0x80, 0x87, 0x22, 0x04, + 0x94, 0x62, 0xc8, 0x55, 0x74, 0xf0, 0x10, 0x6b, 0x58, 0xe8, 0x69, 0x28, 0x45, 0x8e, 0xdb, 0xbc, + 0xe1, 0x7a, 0x24, 0x64, 0x72, 0xe9, 0xa2, 0xcc, 0xdf, 0x26, 0x0a, 0x71, 0x0c, 0xa7, 0x0c, 0x11, + 0x8b, 0x8f, 0xc0, 0xd3, 0x0c, 0x8f, 0x30, 0x6c, 0xc6, 0x10, 0xdd, 0x50, 0xa5, 0x58, 0xc3, 0x40, + 0x3b, 0xf0, 0x98, 0xeb, 0xb1, 0x54, 0x08, 0xa4, 0xb6, 0xeb, 0xb6, 0x37, 0x6e, 0xd4, 0x6e, 0x93, + 0xc0, 0xdd, 0xda, 0x5f, 0x72, 0xea, 0xbb, 0xc4, 0x93, 0x29, 0x20, 0xdf, 0x2f, 0xba, 0xf8, 0x58, + 0xa5, 0x0b, 0x2e, 0xee, 0x4a, 0xc9, 0x7e, 0x09, 0x4e, 0x57, 0xfd, 0x46, 0xd5, 0x0f, 0xa2, 0x55, + 0x3f, 0xb8, 0xeb, 0x04, 0x0d, 0xb9, 0x52, 0xe6, 0x65, 0xac, 0x02, 0x7a, 0x14, 0x0e, 0xf2, 0x83, + 0xc2, 0x88, 0x43, 0xf0, 0x3c, 0x63, 0xbe, 0x8e, 0xe8, 0x61, 0x53, 0x67, 0x6c, 0x80, 0xca, 0x0b, + 0x72, 0xd5, 0x89, 0x08, 0xba, 0xc9, 0xf2, 0x1d, 0xc7, 0x37, 0xa2, 0xa8, 0xfe, 0x94, 0x96, 0xef, + 0x38, 0x06, 0x66, 0x5e, 0xa1, 0x66, 0x7d, 0xfb, 0x7f, 0x0e, 0xb2, 0xc3, 0x31, 0x91, 0x5b, 0x02, + 0x7d, 0x06, 0x26, 0x42, 0x72, 0xc3, 0xf5, 0x3a, 0xf7, 0xa4, 0x4c, 0xa0, 0x8b, 0x8f, 0x54, 0x6d, + 0x45, 0xc7, 0xe4, 0x92, 0x45, 0xb3, 0x0c, 0x27, 0xa8, 0xa1, 0x16, 0x4c, 0xdc, 0x75, 0xbd, 0x86, + 0x7f, 0x37, 0x94, 0xf4, 0x47, 0xf2, 0x05, 0x8c, 0x77, 0x38, 0x66, 0xa2, 0x8f, 0x46, 0x73, 0x77, + 0x0c, 0x62, 0x38, 0x41, 0x9c, 0x2e, 0xc0, 0xa0, 0xe3, 0x2d, 0x86, 0xb7, 0x42, 0x12, 0x88, 0xcc, + 0xd5, 0x6c, 0x01, 0x62, 0x59, 0x88, 0x63, 0x38, 0x5d, 0x80, 0xec, 0xcf, 0xd5, 0xc0, 0xef, 0xf0, + 0x48, 0xfd, 0x62, 0x01, 0x62, 0x55, 0x8a, 0x35, 0x0c, 0xba, 0x41, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, + 0xbe, 0x1f, 0xc9, 0x2d, 0xcd, 0x72, 0xa5, 0x6a, 0xe5, 0xd8, 0xc0, 0x42, 0xab, 0x80, 0xc2, 0x4e, + 0xbb, 0xdd, 0x64, 0xc6, 0x17, 0x4e, 0x93, 0x91, 0xe2, 0x8a, 0xef, 0x22, 0x0f, 0x60, 0x5a, 0x4b, + 0x41, 0x71, 0x46, 0x0d, 0x7a, 0x56, 0x6f, 0x89, 0xae, 0x0e, 0xb2, 0xae, 0x72, 0x65, 0x44, 0x8d, + 0xf7, 0x53, 0xc2, 0xd0, 0x0a, 0x0c, 0x87, 0xfb, 0x61, 0x3d, 0x12, 0x91, 0xd8, 0x72, 0xd2, 0x07, + 0xd5, 0x18, 0x8a, 0x96, 0xbd, 0x8e, 0x57, 0xc1, 0xb2, 0x2e, 0xaa, 0xc3, 0x8c, 0xa0, 0xb8, 0xbc, + 0xe3, 0x78, 0x2a, 0x19, 0x0b, 0xb7, 0x41, 0xbd, 0x72, 0xff, 0x60, 0x7e, 0x46, 0xb4, 0xac, 0x83, + 0x0f, 0x0f, 0xe6, 0xcf, 0x54, 0xfd, 0x46, 0x06, 0x04, 0x67, 0x51, 0xe3, 0x8b, 0xaf, 0x5e, 0xf7, + 0x5b, 0xed, 0x6a, 0xe0, 0x6f, 0xb9, 0x4d, 0xd2, 0x4d, 0xa1, 0x53, 0x33, 0x30, 0xc5, 0xe2, 0x33, + 0xca, 0x70, 0x82, 0x9a, 0xfd, 0xed, 0x8c, 0x9f, 0x61, 0xc9, 0x9a, 0xa3, 0x4e, 0x40, 0x50, 0x0b, + 0xc6, 0xdb, 0x6c, 0x9b, 0x88, 0xf8, 0xf9, 0x62, 0xad, 0xbf, 0xd0, 0xa7, 0x60, 0xe2, 0x2e, 0xbd, + 0x06, 0x94, 0xe0, 0x90, 0xbd, 0xf8, 0xaa, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0x7f, 0xec, 0x11, 0x76, + 0x23, 0xd6, 0xb8, 0xb4, 0x61, 0x58, 0x98, 0xbc, 0x8b, 0xa7, 0xd5, 0x5c, 0xbe, 0xd8, 0x2b, 0x9e, + 0x16, 0x61, 0x36, 0x8f, 0x65, 0x5d, 0xf4, 0x69, 0x98, 0xa0, 0x2f, 0x15, 0x2d, 0x0b, 0xca, 0xa9, + 0xfc, 0xd0, 0x04, 0x71, 0xf2, 0x13, 0x2d, 0xb7, 0x86, 0x5e, 0x19, 0x27, 0x88, 0xa1, 0xd7, 0x99, + 0x71, 0x86, 0x99, 0x60, 0xa5, 0x07, 0x69, 0xdd, 0x0e, 0x43, 0x92, 0xd5, 0x88, 0xe4, 0x25, 0x6f, + 0xb1, 0x1f, 0x6e, 0xf2, 0x16, 0x74, 0x03, 0xc6, 0x45, 0xc6, 0x62, 0xb1, 0x72, 0x8b, 0x86, 0x34, + 0x6e, 0x1c, 0xeb, 0xc0, 0xc3, 0x64, 0x01, 0x36, 0x2b, 0xa3, 0x6d, 0x38, 0xa7, 0x65, 0x10, 0xba, + 0x1a, 0x38, 0x4c, 0xa5, 0xee, 0xb2, 0xe3, 0x54, 0xbb, 0xab, 0x1f, 0xbf, 0x7f, 0x30, 0x7f, 0x6e, + 0xa3, 0x1b, 0x22, 0xee, 0x4e, 0x07, 0xdd, 0x84, 0xd3, 0xdc, 0xb1, 0xb6, 0x4c, 0x9c, 0x46, 0xd3, + 0xf5, 0x14, 0x33, 0xc0, 0xb7, 0xfc, 0xd9, 0xfb, 0x07, 0xf3, 0xa7, 0x17, 0xb3, 0x10, 0x70, 0x76, + 0x3d, 0xf4, 0x51, 0x28, 0x35, 0xbc, 0x50, 0x8c, 0xc1, 0x90, 0x91, 0xa4, 0xa9, 0x54, 0x5e, 0xaf, + 0xa9, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x02, 0xda, 0xe6, 0x12, 0x5b, 0x25, 0x20, 0x19, 0x4e, 0x05, + 0x16, 0x4a, 0x8a, 0xda, 0x0c, 0xd7, 0x3a, 0xae, 0xaa, 0x50, 0x16, 0xe7, 0x86, 0xd7, 0x9d, 0x41, + 0x18, 0xbd, 0x06, 0x88, 0xbe, 0x20, 0xdc, 0x3a, 0x59, 0xac, 0xb3, 0xe4, 0x0c, 0x4c, 0xc0, 0x3d, + 0x62, 0x3a, 0x7b, 0xd5, 0x52, 0x18, 0x38, 0xa3, 0x16, 0xba, 0x46, 0x4f, 0x15, 0xbd, 0x54, 0x9c, + 0x5a, 0x2a, 0xa5, 0x5e, 0x99, 0xb4, 0x03, 0x52, 0x77, 0x22, 0xd2, 0x30, 0x29, 0xe2, 0x44, 0x3d, + 0xd4, 0x80, 0xc7, 0x9c, 0x4e, 0xe4, 0x33, 0x61, 0xb8, 0x89, 0xba, 0xe1, 0xef, 0x12, 0x8f, 0xe9, + 0xa1, 0x46, 0x96, 0x2e, 0x50, 0x6e, 0x63, 0xb1, 0x0b, 0x1e, 0xee, 0x4a, 0x85, 0x72, 0x89, 0x2a, + 0x87, 0x2e, 0x98, 0xe1, 0x92, 0x32, 0xf2, 0xe8, 0xbe, 0x08, 0xa3, 0x3b, 0x7e, 0x18, 0xad, 0x93, + 0xe8, 0xae, 0x1f, 0xec, 0x8a, 0xa8, 0x97, 0x71, 0xa4, 0xe4, 0x18, 0x84, 0x75, 0x3c, 0xfa, 0x0c, + 0x64, 0x56, 0x12, 0x95, 0x32, 0x53, 0x50, 0x8f, 0xc4, 0x67, 0xcc, 0x35, 0x5e, 0x8c, 0x25, 0x5c, + 0xa2, 0x56, 0xaa, 0xcb, 0x4c, 0xd9, 0x9c, 0x40, 0xad, 0x54, 0x97, 0xb1, 0x84, 0xd3, 0xe5, 0x1a, + 0xee, 0x38, 0x01, 0xa9, 0x06, 0x7e, 0x9d, 0x84, 0x5a, 0x7c, 0xee, 0x47, 0x79, 0x4c, 0x4f, 0xba, + 0x5c, 0x6b, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x22, 0xe9, 0xec, 0x59, 0x13, 0xf9, 0x5a, 0x82, 0x34, + 0x3f, 0xd3, 0x67, 0x02, 0x2d, 0x0f, 0xa6, 0x54, 0xde, 0x2e, 0x1e, 0xc5, 0x33, 0x9c, 0x9d, 0x64, + 0x6b, 0xbb, 0xff, 0x10, 0xa0, 0x4a, 0xef, 0x52, 0x49, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xac, + 0xa9, 0x9e, 0x11, 0xb1, 0x2e, 0x43, 0x29, 0xec, 0x6c, 0x36, 0xfc, 0x96, 0xe3, 0x7a, 0x4c, 0xd9, + 0xac, 0xbd, 0x47, 0x6a, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x0a, 0x23, 0x8e, 0x54, 0xaa, 0xa0, 0xfc, + 0x18, 0x28, 0x4a, 0x95, 0xc2, 0xc3, 0x02, 0x48, 0x35, 0x8a, 0xaa, 0x8b, 0x5e, 0x81, 0x71, 0xe1, + 0x18, 0x2a, 0x52, 0x46, 0xce, 0x98, 0xde, 0x3b, 0x35, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0b, 0x46, + 0x23, 0xbf, 0xc9, 0x5c, 0x50, 0x28, 0x9b, 0x77, 0x26, 0x3f, 0x9a, 0xd7, 0x86, 0x42, 0xd3, 0xe5, + 0x99, 0xaa, 0x2a, 0xd6, 0xe9, 0xa0, 0x0d, 0xbe, 0xde, 0x59, 0x9c, 0x6a, 0x12, 0xce, 0x3e, 0x92, + 0x7f, 0x27, 0xa9, 0x70, 0xd6, 0xe6, 0x76, 0x10, 0x35, 0xb1, 0x4e, 0x06, 0x5d, 0x85, 0xe9, 0x76, + 0xe0, 0xfa, 0x6c, 0x4d, 0x28, 0x7d, 0xda, 0xac, 0x99, 0x24, 0xa7, 0x9a, 0x44, 0xc0, 0xe9, 0x3a, + 0xcc, 0xaf, 0x57, 0x14, 0xce, 0x9e, 0xe5, 0x59, 0xa5, 0xf9, 0xf3, 0x8e, 0x97, 0x61, 0x05, 0x45, + 0x6b, 0xec, 0x24, 0xe6, 0x92, 0x89, 0xd9, 0xb9, 0xfc, 0xb0, 0x2b, 0xba, 0x04, 0x83, 0x33, 0xaf, + 0xea, 0x2f, 0x8e, 0x29, 0xa0, 0x86, 0x96, 0x7e, 0x90, 0xbe, 0x18, 0xc2, 0xd9, 0xc7, 0xba, 0x98, + 0xaa, 0x25, 0x9e, 0x17, 0x31, 0x43, 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0x7d, 0x1c, 0xa6, 0x44, + 0xb0, 0xb8, 0x78, 0x98, 0xce, 0xc5, 0x86, 0xbd, 0x38, 0x01, 0xc3, 0x29, 0x6c, 0x1e, 0xbf, 0xdf, + 0xd9, 0x6c, 0x12, 0x71, 0xf4, 0xdd, 0x70, 0xbd, 0xdd, 0x70, 0xf6, 0x3c, 0x3b, 0x1f, 0x44, 0xfc, + 0xfe, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xd5, 0x0e, 0x08, 0x69, 0x31, 0x46, 0x5f, 0xdc, + 0x67, 0xf3, 0xdc, 0xad, 0x9d, 0xf6, 0xa4, 0x9a, 0x80, 0x1d, 0x66, 0x94, 0xe1, 0x14, 0x05, 0x74, + 0x17, 0x46, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, 0x31, 0x7b, 0xa1, 0x8b, 0xa1, 0xb9, 0xb8, 0xdc, + 0x6e, 0x0a, 0xdc, 0x84, 0x0e, 0x5e, 0x16, 0xf7, 0xd6, 0xc1, 0xcb, 0xc6, 0xd0, 0x0f, 0x59, 0x70, + 0x56, 0x8a, 0xed, 0x6b, 0x6d, 0x3a, 0xea, 0xcb, 0xbe, 0x17, 0x46, 0x01, 0x77, 0xc4, 0x7e, 0x3c, + 0xdf, 0x39, 0x79, 0x23, 0xa7, 0x92, 0x12, 0x8e, 0x9e, 0xcd, 0xc3, 0x08, 0x71, 0x7e, 0x8b, 0x68, + 0x19, 0xa6, 0x43, 0x12, 0xc9, 0xc3, 0x68, 0x31, 0x5c, 0x7d, 0xbd, 0xbc, 0x3e, 0xfb, 0x04, 0xf7, + 0x22, 0xa7, 0x9b, 0xa1, 0x96, 0x04, 0xe2, 0x34, 0xfe, 0xdc, 0xb7, 0xc2, 0x74, 0xea, 0xfa, 0x3f, + 0x4a, 0x5e, 0x92, 0xb9, 0x5d, 0x18, 0x37, 0x86, 0xf8, 0xa1, 0xea, 0x70, 0xff, 0xcd, 0x30, 0x94, + 0x94, 0x7e, 0x0f, 0x5d, 0x36, 0xd5, 0xb6, 0x67, 0x93, 0x6a, 0xdb, 0x11, 0xfa, 0xae, 0xd7, 0x35, + 0xb5, 0x1b, 0x19, 0xb1, 0xb3, 0xf2, 0x36, 0x74, 0xff, 0x4e, 0xd1, 0x9a, 0xb8, 0xb6, 0xd8, 0xb7, + 0xfe, 0x77, 0xa0, 0xab, 0x04, 0xf8, 0x2a, 0x4c, 0x7b, 0x3e, 0xe3, 0x39, 0x49, 0x43, 0x32, 0x14, + 0x8c, 0x6f, 0x28, 0xe9, 0xc1, 0x28, 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x2f, 0xfe, 0xa4, + 0xc8, 0x99, 0xf3, 0x05, 0x58, 0x40, 0xd1, 0x13, 0x30, 0xd8, 0xf6, 0x1b, 0x95, 0xaa, 0xe0, 0x37, + 0xb5, 0x88, 0x8d, 0x8d, 0x4a, 0x15, 0x73, 0x18, 0x5a, 0x84, 0x21, 0xf6, 0x23, 0x9c, 0x1d, 0xcb, + 0x8f, 0x3a, 0xc0, 0x6a, 0x68, 0x59, 0x5f, 0x58, 0x05, 0x2c, 0x2a, 0x32, 0xd1, 0x17, 0x65, 0xd2, + 0x99, 0xe8, 0x6b, 0xf8, 0x01, 0x45, 0x5f, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x7b, 0x70, 0xda, 0x78, + 0x18, 0xf1, 0x25, 0x42, 0x42, 0xe1, 0xf9, 0xfc, 0x44, 0xd7, 0x17, 0x91, 0xd0, 0x17, 0x9f, 0x13, + 0x9d, 0x3e, 0x5d, 0xc9, 0xa2, 0x84, 0xb3, 0x1b, 0x40, 0x4d, 0x98, 0xae, 0xa7, 0x5a, 0x1d, 0xe9, + 0xbf, 0x55, 0x35, 0xa1, 0xe9, 0x16, 0xd3, 0x84, 0xd1, 0x2b, 0x30, 0xf2, 0x96, 0x1f, 0xb2, 0xb3, + 0x5a, 0xf0, 0xc8, 0xd2, 0x6d, 0x76, 0xe4, 0xf5, 0x9b, 0x35, 0x56, 0x7e, 0x78, 0x30, 0x3f, 0x5a, + 0xf5, 0x1b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0xf7, 0x5a, 0x30, 0x97, 0x7e, 0x79, 0xa9, 0x4e, 0x8f, + 0xf7, 0xdf, 0x69, 0x5b, 0x34, 0x3a, 0xb7, 0x92, 0x4b, 0x0e, 0x77, 0x69, 0xca, 0xfe, 0x25, 0xae, + 0xdb, 0x15, 0x1a, 0x20, 0x12, 0x76, 0x9a, 0x27, 0x91, 0xec, 0x72, 0xc5, 0x50, 0x4e, 0x3d, 0xb0, + 0xfd, 0xc0, 0xbf, 0xb4, 0x98, 0xfd, 0xc0, 0x09, 0x3a, 0x0a, 0xbc, 0x0e, 0x23, 0x91, 0x4c, 0x59, + 0xda, 0x25, 0x3f, 0xa7, 0xd6, 0x29, 0x66, 0x43, 0xa1, 0x38, 0x56, 0x95, 0x9d, 0x54, 0x91, 0xb1, + 0xff, 0x09, 0x9f, 0x01, 0x09, 0x39, 0x01, 0x1d, 0x40, 0xd9, 0xd4, 0x01, 0xcc, 0xf7, 0xf8, 0x82, + 0x1c, 0x5d, 0xc0, 0x3f, 0x36, 0xfb, 0xcd, 0x24, 0x35, 0xef, 0x76, 0xc3, 0x15, 0xfb, 0x0b, 0x16, + 0x40, 0x1c, 0x10, 0x97, 0xc9, 0x97, 0xfd, 0x40, 0x66, 0x3a, 0xcc, 0xca, 0xe9, 0xf3, 0x12, 0xe5, + 0x51, 0xfd, 0xc8, 0xaf, 0xfb, 0x4d, 0xa1, 0xe1, 0x7a, 0x2c, 0x56, 0x43, 0xf0, 0xf2, 0x43, 0xed, + 0x37, 0x56, 0xd8, 0x68, 0x5e, 0x86, 0xdf, 0x2a, 0xc6, 0x8a, 0x31, 0x23, 0xf4, 0xd6, 0x8f, 0x58, + 0x70, 0x2a, 0xcb, 0xea, 0x94, 0xbe, 0x78, 0xb8, 0xcc, 0x4a, 0x19, 0x15, 0xa9, 0xd9, 0xbc, 0x2d, + 0xca, 0xb1, 0xc2, 0xe8, 0x3b, 0x7f, 0xd7, 0xd1, 0x22, 0xd1, 0xde, 0x84, 0xf1, 0x6a, 0x40, 0xb4, + 0xcb, 0xf5, 0x55, 0xee, 0xd2, 0xcd, 0xfb, 0xf3, 0xcc, 0x91, 0xdd, 0xb9, 0xed, 0x2f, 0x17, 0xe0, + 0x14, 0xb7, 0x0a, 0x58, 0xdc, 0xf3, 0xdd, 0x46, 0xd5, 0x6f, 0x88, 0xdc, 0x6b, 0x9f, 0x82, 0xb1, + 0xb6, 0x26, 0x68, 0xec, 0x16, 0x55, 0x51, 0x17, 0x48, 0xc6, 0xa2, 0x11, 0xbd, 0x14, 0x1b, 0xb4, + 0x50, 0x03, 0xc6, 0xc8, 0x9e, 0x5b, 0x57, 0xaa, 0xe5, 0xc2, 0x91, 0x2f, 0x3a, 0xd5, 0xca, 0x8a, + 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x42, 0x56, 0x5d, 0xfb, 0x47, 0x2d, 0x78, 0x24, 0x27, 0x06, 0x23, + 0x6d, 0xee, 0x2e, 0xb3, 0xbf, 0x10, 0xcb, 0x56, 0x35, 0xc7, 0xad, 0x32, 0xb0, 0x80, 0xa2, 0x4f, + 0x00, 0x70, 0xab, 0x0a, 0xfa, 0xe4, 0xee, 0x15, 0xac, 0xce, 0x88, 0xb3, 0xa5, 0x85, 0x4c, 0x92, + 0xf5, 0xb1, 0x46, 0xcb, 0xfe, 0xa9, 0x22, 0x0c, 0xf2, 0x04, 0xe9, 0xab, 0x30, 0xbc, 0xc3, 0x33, + 0x52, 0xf4, 0x93, 0xfc, 0x22, 0x16, 0x86, 0xf0, 0x02, 0x2c, 0x2b, 0xa3, 0x35, 0x98, 0xe1, 0x19, + 0x3d, 0x9a, 0x65, 0xd2, 0x74, 0xf6, 0xa5, 0xe4, 0x8e, 0x67, 0xc3, 0x54, 0x12, 0xcc, 0x4a, 0x1a, + 0x05, 0x67, 0xd5, 0x43, 0xaf, 0xc2, 0x04, 0x7d, 0x49, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb9, 0x3c, + 0xd4, 0xd3, 0x6d, 0xc3, 0x80, 0xe2, 0x04, 0x36, 0x7d, 0xcc, 0xb7, 0x53, 0x32, 0xca, 0xc1, 0xf8, + 0x31, 0x6f, 0xca, 0x25, 0x4d, 0x5c, 0x66, 0x6e, 0xda, 0x61, 0xc6, 0xb5, 0x1b, 0x3b, 0x01, 0x09, + 0x77, 0xfc, 0x66, 0x83, 0x31, 0x7d, 0x83, 0x9a, 0xb9, 0x69, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, + 0x2d, 0xc7, 0x6d, 0x76, 0x02, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x26, 0xe0, 0x38, 0x55, 0x83, + 0xae, 0xa3, 0xd3, 0xd5, 0xc0, 0xa7, 0x07, 0xa9, 0x0c, 0x2c, 0xa3, 0x6c, 0x88, 0x87, 0xa5, 0x0f, + 0x6c, 0x97, 0x10, 0x6c, 0xc2, 0xca, 0x92, 0x53, 0x30, 0x0c, 0x08, 0x6a, 0xc2, 0xfb, 0x55, 0x52, + 0x41, 0x57, 0x60, 0x54, 0xe4, 0x69, 0x60, 0xa6, 0xae, 0x7c, 0xea, 0x98, 0xc1, 0x43, 0x39, 0x2e, + 0xc6, 0x3a, 0x8e, 0xfd, 0x7d, 0x05, 0x98, 0xc9, 0xf0, 0x55, 0xe0, 0x47, 0xd5, 0xb6, 0x1b, 0x46, + 0x2a, 0xe3, 0x9f, 0x76, 0x54, 0xf1, 0x72, 0xac, 0x30, 0xe8, 0x7e, 0xe0, 0x87, 0x61, 0xf2, 0x00, + 0x14, 0xb6, 0xc0, 0x02, 0x7a, 0xc4, 0xdc, 0x79, 0x17, 0x60, 0xa0, 0x13, 0x12, 0x19, 0x3c, 0x51, + 0x5d, 0x0d, 0x4c, 0x0f, 0xc6, 0x20, 0x94, 0x55, 0xdf, 0x56, 0x2a, 0x25, 0x8d, 0x55, 0xe7, 0x4a, + 0x25, 0x0e, 0xa3, 0x9d, 0x8b, 0x88, 0xe7, 0x78, 0x91, 0x60, 0xe8, 0xe3, 0x28, 0x60, 0xac, 0x14, + 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xcd, 0xf5, 0x5e, 0xa2, 0x5d, 0x6f, 0xf9, 0x9e, 0x1b, 0xf9, + 0xca, 0x92, 0x84, 0x47, 0xfe, 0x22, 0xed, 0x9d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x08, 0x83, + 0x4c, 0x8a, 0x96, 0xca, 0x7d, 0xb8, 0x54, 0xe6, 0xa1, 0x60, 0x38, 0xb8, 0xef, 0xbc, 0xb2, 0x4f, + 0xd0, 0x5b, 0xd2, 0x6f, 0x26, 0x0f, 0x2d, 0xda, 0x5d, 0xdf, 0x6f, 0x62, 0x06, 0x44, 0x1f, 0x10, + 0xe3, 0x95, 0x30, 0x9d, 0xc0, 0x4e, 0xc3, 0x0f, 0xb5, 0x41, 0x7b, 0x0a, 0x86, 0x77, 0xc9, 0x7e, + 0xe0, 0x7a, 0xdb, 0x49, 0x93, 0x9a, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x99, 0xc6, 0x6a, 0xf8, 0xb8, + 0x13, 0xc2, 0x8e, 0xf4, 0xbc, 0x02, 0x7f, 0xa0, 0x08, 0x93, 0x78, 0xa9, 0xfc, 0xde, 0x44, 0xdc, + 0x4a, 0x4f, 0xc4, 0x71, 0x27, 0x84, 0xed, 0x3d, 0x1b, 0x3f, 0x6f, 0xc1, 0x24, 0xcb, 0x16, 0x21, + 0x62, 0x46, 0xb9, 0xbe, 0x77, 0x02, 0xec, 0xe6, 0x13, 0x30, 0x18, 0xd0, 0x46, 0x93, 0x49, 0x0f, + 0x59, 0x4f, 0x30, 0x87, 0xa1, 0xc7, 0x60, 0x80, 0x75, 0x81, 0x4e, 0xde, 0x18, 0xcf, 0x17, 0x55, + 0x76, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x08, 0x05, 0x93, 0x76, 0xd3, 0xe5, 0x9d, 0x8e, 0x75, 0x9c, + 0xef, 0x0e, 0xbf, 0xe6, 0xcc, 0xae, 0xbd, 0xb3, 0x40, 0x28, 0xd9, 0x24, 0xbb, 0x3f, 0xe5, 0xfe, + 0xa8, 0x00, 0xe7, 0x33, 0xeb, 0xf5, 0x1d, 0x08, 0xa5, 0x7b, 0xed, 0x87, 0x99, 0x0f, 0xa0, 0x78, + 0x82, 0x06, 0x8b, 0x03, 0xfd, 0x72, 0x98, 0x83, 0x7d, 0xc4, 0x27, 0xc9, 0x1c, 0xb2, 0x77, 0x49, + 0x7c, 0x92, 0xcc, 0xbe, 0xe5, 0x3c, 0x45, 0xff, 0xa2, 0x90, 0xf3, 0x2d, 0xec, 0x51, 0x7a, 0x89, + 0x9e, 0x33, 0x0c, 0x18, 0xca, 0x87, 0x1e, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xb4, 0x08, 0x93, + 0x2d, 0xd7, 0xa3, 0x87, 0xcf, 0xbe, 0xc9, 0xf8, 0xa9, 0xf0, 0x51, 0x6b, 0x26, 0x18, 0x27, 0xf1, + 0x91, 0xab, 0xc5, 0x2e, 0x29, 0xe4, 0xa7, 0x11, 0xcf, 0xed, 0xed, 0x82, 0xa9, 0xff, 0x55, 0xa3, + 0x98, 0x11, 0xc7, 0x64, 0x4d, 0x93, 0x45, 0x14, 0xfb, 0x97, 0x45, 0x8c, 0x65, 0xcb, 0x21, 0xe6, + 0x5e, 0x81, 0xf1, 0x07, 0x16, 0x3e, 0xdb, 0x5f, 0x2d, 0xc2, 0xa3, 0x5d, 0xb6, 0x3d, 0x3f, 0xeb, + 0x8d, 0x39, 0xd0, 0xce, 0xfa, 0xd4, 0x3c, 0x54, 0xe1, 0xd4, 0x56, 0xa7, 0xd9, 0xdc, 0x67, 0x3e, + 0x01, 0xa4, 0x21, 0x31, 0x04, 0x4f, 0x29, 0x1f, 0xe0, 0xa7, 0x56, 0x33, 0x70, 0x70, 0x66, 0x4d, + 0xca, 0xd0, 0xd3, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0x30, 0xf4, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, + 0x85, 0x69, 0x67, 0xcf, 0x71, 0x79, 0x00, 0x58, 0x49, 0x80, 0x73, 0xf4, 0x4a, 0x66, 0xb8, 0x98, + 0x44, 0xc0, 0xe9, 0x3a, 0xe8, 0x35, 0x40, 0xfe, 0x26, 0xb3, 0xf6, 0x6d, 0x5c, 0x25, 0x9e, 0x50, + 0xd3, 0xb1, 0xb9, 0x2b, 0xc6, 0x47, 0xc2, 0xcd, 0x14, 0x06, 0xce, 0xa8, 0x95, 0x88, 0x05, 0x32, + 0x94, 0x1f, 0x0b, 0xa4, 0xfb, 0xb9, 0xd8, 0x33, 0x15, 0xc5, 0x7f, 0xb1, 0xe8, 0xf5, 0xc5, 0x99, + 0x7c, 0x33, 0xa4, 0xdd, 0x2b, 0xcc, 0xcc, 0x8e, 0xcb, 0x13, 0xb5, 0x08, 0x16, 0xa7, 0x35, 0x33, + 0xbb, 0x18, 0x88, 0x4d, 0x5c, 0xbe, 0x20, 0xc2, 0xd8, 0x71, 0xd2, 0x60, 0xf1, 0x45, 0xdc, 0x1d, + 0x85, 0x81, 0x3e, 0x09, 0xc3, 0x0d, 0x77, 0xcf, 0x0d, 0x85, 0x34, 0xe5, 0xc8, 0xaa, 0x8b, 0xf8, + 0x1c, 0x2c, 0x73, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0xa0, 0x00, 0xe3, 0xb2, 0xc5, 0xd7, 0x3b, 0x7e, + 0xe4, 0x9c, 0xc0, 0xb5, 0x7c, 0xd5, 0xb8, 0x96, 0x3f, 0xd0, 0x2d, 0xf8, 0x10, 0xeb, 0x52, 0xee, + 0x75, 0x7c, 0x33, 0x71, 0x1d, 0x3f, 0xd9, 0x9b, 0x54, 0xf7, 0x6b, 0xf8, 0x9f, 0x5a, 0x30, 0x6d, + 0xe0, 0x9f, 0xc0, 0x6d, 0xb0, 0x6a, 0xde, 0x06, 0x8f, 0xf7, 0xfc, 0x86, 0x9c, 0x5b, 0xe0, 0xbb, + 0x8b, 0x89, 0xbe, 0xb3, 0xd3, 0xff, 0x2d, 0x18, 0xd8, 0x71, 0x82, 0x46, 0xb7, 0x60, 0xeb, 0xa9, + 0x4a, 0x0b, 0xd7, 0x9c, 0x40, 0xe8, 0x29, 0x9f, 0x51, 0x59, 0xbc, 0x9d, 0xa0, 0xb7, 0x8e, 0x92, + 0x35, 0x85, 0x5e, 0x82, 0xa1, 0xb0, 0xee, 0xb7, 0x95, 0x15, 0xff, 0x05, 0x9e, 0xe1, 0x9b, 0x96, + 0x1c, 0x1e, 0xcc, 0x23, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0xfa, 0x14, 0x8c, 0xb3, 0x5f, 0xca, + 0x68, 0xa8, 0x98, 0x9f, 0x98, 0xa9, 0xa6, 0x23, 0x72, 0x8b, 0x3a, 0xa3, 0x08, 0x9b, 0xa4, 0xe6, + 0xb6, 0xa1, 0xa4, 0x3e, 0xeb, 0xa1, 0xea, 0x06, 0xff, 0x63, 0x11, 0x66, 0x32, 0xd6, 0x1c, 0x0a, + 0x8d, 0x99, 0xb8, 0xd2, 0xe7, 0x52, 0x7d, 0x87, 0x73, 0x11, 0xb2, 0xd7, 0x50, 0x43, 0xac, 0xad, + 0xbe, 0x1b, 0xbd, 0x15, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, + 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0x2d, 0xc2, 0xa9, 0xac, 0x78, 0x68, 0xe8, 0x73, + 0x89, 0x54, 0x7f, 0x2f, 0xf4, 0x1b, 0x49, 0x8d, 0xe7, 0xff, 0xe3, 0x32, 0xe0, 0xa5, 0x05, 0x33, + 0xf9, 0x5f, 0xcf, 0x61, 0x16, 0x6d, 0xb2, 0xa0, 0x00, 0x01, 0x4f, 0xd1, 0x28, 0x8f, 0x8f, 0x0f, + 0xf7, 0xdd, 0x01, 0x91, 0xdb, 0x31, 0x4c, 0x18, 0x24, 0xc8, 0xe2, 0xde, 0x06, 0x09, 0xb2, 0xe5, + 0x39, 0x17, 0x46, 0xb5, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, + 0xd6, 0x7f, 0xd4, 0x82, 0x84, 0x8d, 0xba, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x0b, 0x30, 0x10, + 0xf8, 0x4d, 0x92, 0xcc, 0x89, 0x87, 0xfd, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xa2, 0x58, 0xd8, 0x31, + 0xa6, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6c, 0x92, 0x3d, 0xd2, 0x4c, 0xa6, 0x2e, 0xb9, + 0x41, 0x0b, 0x31, 0x87, 0xd9, 0x3f, 0x3f, 0x00, 0xe7, 0xba, 0x86, 0xd5, 0xa0, 0xcf, 0xa1, 0x6d, + 0x27, 0x22, 0x77, 0x9d, 0xfd, 0x64, 0x8e, 0x81, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, 0xe2, + 0xa1, 0x82, 0x13, 0x42, 0x44, 0x11, 0x21, 0x58, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, 0x52, + 0xcf, 0x01, 0x84, 0x61, 0x93, 0x5b, 0xf2, 0x34, 0x84, 0x7b, 0x52, 0x1c, 0x52, 0xba, 0x76, 0x43, + 0x40, 0xb0, 0x86, 0x85, 0xca, 0x30, 0xd5, 0x0e, 0xfc, 0x88, 0xcb, 0x64, 0xcb, 0xdc, 0xd8, 0x6d, + 0xd0, 0x8c, 0x68, 0x50, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0x7a, 0x11, 0x46, 0x45, 0x94, 0x83, 0xaa, + 0xef, 0x37, 0x85, 0x18, 0x48, 0xd9, 0x7f, 0xd5, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0xd0, + 0x3b, 0x9c, 0x59, 0x8d, 0x0b, 0x7b, 0x35, 0xbc, 0x44, 0x6c, 0xc4, 0x91, 0xbe, 0x62, 0x23, 0xc6, + 0x82, 0xb1, 0x52, 0xdf, 0xba, 0x2d, 0xe8, 0x29, 0x4a, 0xfa, 0x99, 0x01, 0x98, 0x11, 0x0b, 0xe7, + 0x61, 0x2f, 0x97, 0x5b, 0xe9, 0xe5, 0x72, 0x1c, 0xa2, 0xb3, 0xf7, 0xd6, 0xcc, 0x49, 0xaf, 0x99, + 0x1f, 0xb4, 0xc0, 0x64, 0xaf, 0xd0, 0x5f, 0xca, 0x4d, 0xd2, 0xf2, 0x62, 0x2e, 0xbb, 0xd6, 0x90, + 0x17, 0xc8, 0x3b, 0x4c, 0xd7, 0x62, 0xff, 0x67, 0x0b, 0x1e, 0xef, 0x49, 0x11, 0xad, 0x40, 0x89, + 0xf1, 0x80, 0xda, 0xeb, 0xec, 0x49, 0x65, 0x0c, 0x2b, 0x01, 0x39, 0x2c, 0x69, 0x5c, 0x13, 0xad, + 0xa4, 0xb2, 0xe1, 0x3c, 0x95, 0x91, 0x0d, 0xe7, 0xb4, 0x31, 0x3c, 0x0f, 0x98, 0x0e, 0xe7, 0xfb, + 0xe9, 0x8d, 0x63, 0x38, 0xa2, 0xa0, 0x0f, 0x1b, 0x62, 0x3f, 0x3b, 0x21, 0xf6, 0x43, 0x26, 0xb6, + 0x76, 0x87, 0x7c, 0x1c, 0xa6, 0x58, 0xf8, 0x23, 0x66, 0x9a, 0x2d, 0x5c, 0x64, 0x0a, 0xb1, 0xf9, + 0xe5, 0x8d, 0x04, 0x0c, 0xa7, 0xb0, 0xed, 0x3f, 0x2c, 0xc2, 0x10, 0xdf, 0x7e, 0x27, 0xf0, 0x26, + 0x7c, 0x1a, 0x4a, 0x6e, 0xab, 0xd5, 0xe1, 0x09, 0x4e, 0x06, 0xb9, 0x5f, 0x2c, 0x9d, 0xa7, 0x8a, + 0x2c, 0xc4, 0x31, 0x1c, 0xad, 0x0a, 0x89, 0x73, 0x97, 0x08, 0x8b, 0xbc, 0xe3, 0x0b, 0x65, 0x27, + 0x72, 0x38, 0x83, 0xa3, 0xee, 0xd9, 0x58, 0x36, 0x8d, 0x3e, 0x03, 0x10, 0x46, 0x81, 0xeb, 0x6d, + 0xd3, 0x32, 0x11, 0x50, 0xf4, 0x83, 0x5d, 0xa8, 0xd5, 0x14, 0x32, 0xa7, 0x19, 0x9f, 0x39, 0x0a, + 0x80, 0x35, 0x8a, 0x68, 0xc1, 0xb8, 0xe9, 0xe7, 0x12, 0x73, 0x07, 0x9c, 0x6a, 0x3c, 0x67, 0x73, + 0x1f, 0x81, 0x92, 0x22, 0xde, 0x4b, 0xfe, 0x34, 0xa6, 0xb3, 0x45, 0x1f, 0x83, 0xc9, 0x44, 0xdf, + 0x8e, 0x24, 0xbe, 0xfa, 0x05, 0x0b, 0x26, 0x79, 0x67, 0x56, 0xbc, 0x3d, 0x71, 0x1b, 0xbc, 0x0d, + 0xa7, 0x9a, 0x19, 0xa7, 0xb2, 0x98, 0xfe, 0xfe, 0x4f, 0x71, 0x25, 0xae, 0xca, 0x82, 0xe2, 0xcc, + 0x36, 0xd0, 0x25, 0xba, 0xe3, 0xe8, 0xa9, 0xeb, 0x34, 0x85, 0x9b, 0xec, 0x18, 0xdf, 0x6d, 0xbc, + 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0x5f, 0x27, 0xfb, 0xea, 0x6c, 0xfa, 0x7a, + 0xf6, 0x5d, 0xa4, 0xd6, 0x2a, 0xe4, 0xa4, 0xd6, 0xd2, 0x3f, 0xad, 0xd8, 0xf5, 0xd3, 0xbe, 0x6c, + 0x81, 0x58, 0x21, 0x27, 0x20, 0x84, 0xf8, 0x56, 0x53, 0x08, 0x31, 0x97, 0xbf, 0x09, 0x72, 0xa4, + 0x0f, 0x7f, 0x6e, 0xc1, 0x14, 0x47, 0x88, 0xb5, 0xe5, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0x01, 0xef, + 0x75, 0xb2, 0xbf, 0xe1, 0x57, 0x9d, 0x68, 0x27, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, + 0x0d, 0xb9, 0x81, 0x8e, 0x90, 0xd5, 0xfb, 0xc8, 0x99, 0x27, 0xec, 0xaf, 0x59, 0x80, 0x78, 0x33, + 0x06, 0xe3, 0x46, 0xd9, 0x21, 0x56, 0xaa, 0x5d, 0x74, 0xf1, 0xd1, 0xa4, 0x20, 0x58, 0xc3, 0x3a, + 0x96, 0xe1, 0x49, 0x98, 0x3c, 0x14, 0x7b, 0x9b, 0x3c, 0x1c, 0x61, 0x44, 0xff, 0xed, 0x10, 0x24, + 0x9d, 0x71, 0xd0, 0x6d, 0x18, 0xab, 0x3b, 0x6d, 0x67, 0xd3, 0x6d, 0xba, 0x91, 0x4b, 0xc2, 0x6e, + 0xb6, 0x52, 0xcb, 0x1a, 0x9e, 0x50, 0x52, 0x6b, 0x25, 0xd8, 0xa0, 0x83, 0x16, 0x00, 0xda, 0x81, + 0xbb, 0xe7, 0x36, 0xc9, 0x36, 0x93, 0x95, 0x30, 0xc7, 0x7c, 0x6e, 0x00, 0x24, 0x4b, 0xb1, 0x86, + 0x91, 0xe1, 0xf9, 0x5c, 0x7c, 0xc8, 0x9e, 0xcf, 0x70, 0x62, 0x9e, 0xcf, 0x03, 0x47, 0xf2, 0x7c, + 0x1e, 0x39, 0xb2, 0xe7, 0xf3, 0x60, 0x5f, 0x9e, 0xcf, 0x18, 0xce, 0x48, 0xde, 0x93, 0xfe, 0x5f, + 0x75, 0x9b, 0x44, 0x3c, 0x38, 0x78, 0x34, 0x81, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x70, 0x26, 0x06, + 0xce, 0xa9, 0x89, 0x3e, 0x01, 0xb3, 0x4e, 0xb3, 0xe9, 0xdf, 0x55, 0x93, 0xba, 0x12, 0xd6, 0x9d, + 0x26, 0x57, 0x42, 0x0c, 0x33, 0xaa, 0x8f, 0xdd, 0x3f, 0x98, 0x9f, 0x5d, 0xcc, 0xc1, 0xc1, 0xb9, + 0xb5, 0xd1, 0x47, 0xa1, 0xd4, 0x0e, 0xfc, 0xfa, 0x9a, 0xe6, 0x31, 0x78, 0x9e, 0x0e, 0x60, 0x55, + 0x16, 0x1e, 0x1e, 0xcc, 0x8f, 0xab, 0x3f, 0xec, 0xc2, 0x8f, 0x2b, 0x64, 0xb8, 0x32, 0x8f, 0x1e, + 0xab, 0x2b, 0xf3, 0x2e, 0xcc, 0xd4, 0x48, 0xe0, 0xb2, 0x1c, 0xe0, 0x8d, 0xf8, 0x7c, 0xda, 0x80, + 0x52, 0x90, 0x38, 0x91, 0xfb, 0x8a, 0x7a, 0xa8, 0xa5, 0x00, 0x90, 0x27, 0x70, 0x4c, 0xc8, 0xfe, + 0x3f, 0x16, 0x0c, 0x0b, 0xe7, 0x9b, 0x13, 0xe0, 0x1a, 0x17, 0x0d, 0x4d, 0xc2, 0x7c, 0xf6, 0x80, + 0xb1, 0xce, 0xe4, 0xea, 0x10, 0x2a, 0x09, 0x1d, 0xc2, 0xe3, 0xdd, 0x88, 0x74, 0xd7, 0x1e, 0xfc, + 0xcd, 0x22, 0xe5, 0xde, 0x0d, 0x37, 0xd0, 0x87, 0x3f, 0x04, 0xeb, 0x30, 0x1c, 0x0a, 0x37, 0xc4, + 0x42, 0xbe, 0xdd, 0x7c, 0x72, 0x12, 0x63, 0x3b, 0x36, 0xe1, 0x78, 0x28, 0x89, 0x64, 0xfa, 0x37, + 0x16, 0x1f, 0xa2, 0x7f, 0x63, 0x2f, 0x47, 0xd9, 0x81, 0xe3, 0x70, 0x94, 0xb5, 0xbf, 0xc2, 0x6e, + 0x4e, 0xbd, 0xfc, 0x04, 0x98, 0xaa, 0xab, 0xe6, 0x1d, 0x6b, 0x77, 0x59, 0x59, 0xa2, 0x53, 0x39, + 0xcc, 0xd5, 0xcf, 0x59, 0x70, 0x2e, 0xe3, 0xab, 0x34, 0x4e, 0xeb, 0x19, 0x18, 0x71, 0x3a, 0x0d, + 0x57, 0xed, 0x65, 0x4d, 0x9f, 0xb8, 0x28, 0xca, 0xb1, 0xc2, 0x40, 0xcb, 0x30, 0x4d, 0xee, 0xb5, + 0x5d, 0xae, 0x4a, 0xd5, 0x8d, 0x4d, 0x8b, 0xdc, 0x63, 0x6b, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0x15, + 0x9c, 0xa4, 0x98, 0x1b, 0x9c, 0xe4, 0x1f, 0x58, 0x30, 0xaa, 0x1c, 0xf1, 0x1e, 0xfa, 0x68, 0x7f, + 0xdc, 0x1c, 0xed, 0x47, 0xbb, 0x8c, 0x76, 0xce, 0x30, 0xff, 0x56, 0x41, 0xf5, 0xb7, 0xea, 0x07, + 0x51, 0x1f, 0x1c, 0xdc, 0x83, 0x9b, 0xc7, 0x5f, 0x81, 0x51, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x83, + 0xc6, 0x62, 0xd8, 0xc6, 0xc5, 0x58, 0xc7, 0x51, 0xd6, 0xfa, 0xc5, 0x5c, 0x6b, 0xfd, 0x06, 0x40, + 0xe4, 0x04, 0xdb, 0x24, 0xa2, 0x65, 0x22, 0x90, 0x58, 0xfe, 0x79, 0xd3, 0x89, 0xdc, 0xe6, 0x82, + 0xeb, 0x45, 0x61, 0x14, 0x2c, 0x54, 0xbc, 0xe8, 0x66, 0xc0, 0x9f, 0x90, 0x5a, 0xa4, 0x1e, 0x45, + 0x0b, 0x6b, 0x74, 0xa5, 0xd3, 0x39, 0x6b, 0x63, 0xd0, 0x34, 0x66, 0x58, 0x17, 0xe5, 0x58, 0x61, + 0xd8, 0x1f, 0x61, 0xb7, 0x0f, 0x1b, 0xd3, 0xa3, 0x85, 0xb6, 0xf9, 0xf2, 0xa8, 0x9a, 0x0d, 0xa6, + 0xc9, 0x2c, 0xeb, 0x01, 0x74, 0xba, 0x1f, 0xf6, 0xb4, 0x61, 0xdd, 0x77, 0x2c, 0x8e, 0xb2, 0x83, + 0xbe, 0x2d, 0x65, 0xa0, 0xf2, 0x6c, 0x8f, 0x5b, 0xe3, 0x08, 0x26, 0x29, 0x2c, 0xa1, 0x05, 0x0b, + 0xf7, 0x5f, 0xa9, 0x8a, 0x7d, 0xa1, 0x25, 0xb4, 0x10, 0x00, 0x1c, 0xe3, 0x50, 0x66, 0x4a, 0xfd, + 0x09, 0x67, 0x51, 0x1c, 0xd8, 0x51, 0x61, 0x87, 0x58, 0xc3, 0x40, 0x97, 0x85, 0x40, 0x81, 0xeb, + 0x05, 0x1e, 0x4d, 0x08, 0x14, 0xe4, 0x70, 0x69, 0x52, 0xa0, 0x2b, 0x30, 0xaa, 0x72, 0xda, 0x56, + 0x79, 0xaa, 0x54, 0xb1, 0xcc, 0x56, 0xe2, 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0c, 0xb9, 0x9c, + 0x4d, 0x45, 0xdb, 0xe5, 0xf2, 0xca, 0x0f, 0x4a, 0x2b, 0xa0, 0x9a, 0x09, 0x3e, 0x64, 0x45, 0xfc, + 0x74, 0x92, 0x8e, 0xe1, 0x49, 0x12, 0xe8, 0x55, 0x98, 0x68, 0xfa, 0x4e, 0x63, 0xc9, 0x69, 0x3a, + 0x5e, 0x9d, 0x8d, 0xcf, 0x88, 0x99, 0x1a, 0xf1, 0x86, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0xbc, 0xe9, + 0x25, 0x22, 0x42, 0xb4, 0xe3, 0x6d, 0x93, 0x50, 0x64, 0x28, 0x65, 0xcc, 0xdb, 0x8d, 0x1c, 0x1c, + 0x9c, 0x5b, 0x1b, 0xbd, 0x04, 0x63, 0xf2, 0xf3, 0xb5, 0x38, 0x0a, 0xb1, 0xe3, 0x83, 0x06, 0xc3, + 0x06, 0x26, 0xba, 0x0b, 0xa7, 0xe5, 0xff, 0x8d, 0xc0, 0xd9, 0xda, 0x72, 0xeb, 0xc2, 0xb9, 0x98, + 0x7b, 0x48, 0x2e, 0x4a, 0x37, 0xbe, 0x95, 0x2c, 0xa4, 0xc3, 0x83, 0xf9, 0x0b, 0x62, 0xd4, 0x32, + 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x35, 0x98, 0xd9, 0x21, 0x4e, 0x33, 0xda, 0x59, 0xde, 0x21, + 0xf5, 0x5d, 0xb9, 0xe9, 0x58, 0x74, 0x06, 0xcd, 0x5d, 0xe0, 0x5a, 0x1a, 0x05, 0x67, 0xd5, 0x43, + 0x6f, 0xc0, 0x6c, 0xbb, 0xb3, 0xd9, 0x74, 0xc3, 0x9d, 0x75, 0x3f, 0x62, 0xa6, 0x40, 0x2a, 0x45, + 0xae, 0x08, 0xe3, 0xa0, 0xe2, 0x5f, 0x54, 0x73, 0xf0, 0x70, 0x2e, 0x05, 0xf4, 0x36, 0x9c, 0x4e, + 0x2c, 0x06, 0xe1, 0xc8, 0x3e, 0x91, 0x1f, 0x6f, 0xbf, 0x96, 0x55, 0x41, 0xc4, 0x84, 0xc8, 0x02, + 0xe1, 0xec, 0x26, 0xe8, 0xe3, 0x43, 0x0b, 0x70, 0x1a, 0xce, 0x4e, 0xc5, 0x36, 0xcb, 0x5a, 0x14, + 0xd4, 0x10, 0x1b, 0x58, 0xe8, 0x65, 0x00, 0xb7, 0xbd, 0xea, 0xb4, 0xdc, 0x26, 0x7d, 0x64, 0xce, + 0xb0, 0x3a, 0xf4, 0xc1, 0x01, 0x95, 0xaa, 0x2c, 0xa5, 0xa7, 0xba, 0xf8, 0xb7, 0x8f, 0x35, 0x6c, + 0x54, 0x85, 0x09, 0xf1, 0x6f, 0x5f, 0x2c, 0x86, 0x69, 0xe5, 0x69, 0x3e, 0x21, 0x6b, 0xa8, 0x15, + 0x80, 0xcc, 0x12, 0x36, 0xe7, 0x89, 0xfa, 0x68, 0x1b, 0xce, 0x89, 0x1c, 0xcc, 0x44, 0x5f, 0xdd, + 0x72, 0xf6, 0x42, 0x16, 0x1e, 0x7f, 0x84, 0x07, 0x90, 0x59, 0xec, 0x86, 0x88, 0xbb, 0xd3, 0x79, + 0x67, 0x16, 0x70, 0xbf, 0x6d, 0xd1, 0xda, 0x1a, 0x97, 0x8c, 0x3e, 0x0b, 0x63, 0xfa, 0x9e, 0x13, + 0x37, 0xfe, 0xc5, 0x6c, 0x26, 0x52, 0xdb, 0x9b, 0x9c, 0xc7, 0x56, 0xfb, 0x4f, 0x87, 0x61, 0x83, + 0x22, 0xaa, 0x67, 0xb8, 0x51, 0x5f, 0xee, 0x8f, 0xa3, 0xe8, 0xdf, 0x00, 0x8c, 0x40, 0xf6, 0x92, + 0x43, 0x37, 0x60, 0xa4, 0xde, 0x74, 0x89, 0x17, 0x55, 0xaa, 0xdd, 0x02, 0x9f, 0x2d, 0x0b, 0x1c, + 0xb1, 0x86, 0x45, 0xcc, 0x78, 0x5e, 0x86, 0x15, 0x05, 0xfb, 0x57, 0x0b, 0x30, 0xdf, 0x23, 0x01, + 0x41, 0x42, 0x1d, 0x64, 0xf5, 0xa5, 0x0e, 0x5a, 0x94, 0x39, 0x98, 0xd7, 0x13, 0x92, 0xa6, 0x44, + 0x7e, 0xe5, 0x58, 0xde, 0x94, 0xc4, 0xef, 0xdb, 0x3c, 0x5f, 0xd7, 0x28, 0x0d, 0xf4, 0x74, 0x30, + 0x31, 0x34, 0xc9, 0x83, 0xfd, 0x3f, 0x3f, 0x73, 0xb5, 0x82, 0xf6, 0x57, 0x0a, 0x70, 0x5a, 0x0d, + 0xe1, 0x37, 0xef, 0xc0, 0xdd, 0x4a, 0x0f, 0xdc, 0x31, 0xe8, 0x54, 0xed, 0x9b, 0x30, 0xc4, 0x23, + 0xb9, 0xf5, 0xc1, 0xf6, 0x3e, 0x61, 0x06, 0x3d, 0x55, 0x9c, 0x96, 0x11, 0xf8, 0xf4, 0x7b, 0x2d, + 0x98, 0xdc, 0x58, 0xae, 0xd6, 0xfc, 0xfa, 0x2e, 0x89, 0x16, 0xf9, 0x33, 0x05, 0x6b, 0x0e, 0xa7, + 0x0f, 0xc2, 0x9a, 0x66, 0x31, 0xbd, 0x17, 0x60, 0x60, 0xc7, 0x0f, 0xa3, 0xa4, 0xc1, 0xc5, 0x35, + 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xbf, 0x6b, 0xc1, 0xe0, 0x86, 0xe3, 0x7a, 0x91, 0x14, 0xce, 0x5b, + 0x39, 0xc2, 0xf9, 0x7e, 0xbe, 0x0b, 0xbd, 0x08, 0x43, 0x64, 0x6b, 0x8b, 0xd4, 0x23, 0x31, 0xab, + 0xd2, 0x5b, 0x7f, 0x68, 0x85, 0x95, 0x52, 0x3e, 0x8c, 0x35, 0xc6, 0xff, 0x62, 0x81, 0x8c, 0xee, + 0x40, 0x29, 0x72, 0x5b, 0x64, 0xb1, 0xd1, 0x10, 0x2a, 0xeb, 0x07, 0x88, 0x38, 0xb0, 0x21, 0x09, + 0xe0, 0x98, 0x96, 0xfd, 0xc5, 0x02, 0x40, 0x1c, 0x02, 0xa7, 0xd7, 0x27, 0x2e, 0xa5, 0x94, 0x99, + 0x17, 0x33, 0x94, 0x99, 0x28, 0x26, 0x98, 0xa1, 0xc9, 0x54, 0xc3, 0x54, 0xec, 0x6b, 0x98, 0x06, + 0x8e, 0x32, 0x4c, 0xcb, 0x30, 0x1d, 0x87, 0xf0, 0x31, 0x23, 0x98, 0xb1, 0xa7, 0xe9, 0x46, 0x12, + 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x0b, 0x2a, 0x92, 0x89, 0xb8, 0xd1, 0x98, 0x45, 0xb4, 0xae, 0x1c, + 0xee, 0x31, 0x4e, 0xb1, 0xb6, 0xb6, 0x90, 0xab, 0xad, 0xfd, 0x09, 0x0b, 0x4e, 0x25, 0xdb, 0x61, + 0x2e, 0xaa, 0x5f, 0xb0, 0xe0, 0x34, 0xd3, 0x59, 0xb3, 0x56, 0xd3, 0x1a, 0xf2, 0x17, 0xba, 0x46, + 0x67, 0xc9, 0xe9, 0x71, 0x1c, 0x16, 0x62, 0x2d, 0x8b, 0x34, 0xce, 0x6e, 0xd1, 0xfe, 0x4f, 0x05, + 0x98, 0xcd, 0x0b, 0xeb, 0xc2, 0x1c, 0x26, 0x9c, 0x7b, 0xb5, 0x5d, 0x72, 0x57, 0x98, 0xa5, 0xc7, + 0x0e, 0x13, 0xbc, 0x18, 0x4b, 0x78, 0x32, 0xa6, 0x7c, 0xa1, 0xbf, 0x98, 0xf2, 0x68, 0x07, 0xa6, + 0xef, 0xee, 0x10, 0xef, 0x96, 0x17, 0x3a, 0x91, 0x1b, 0x6e, 0xb9, 0x4c, 0xbf, 0xcb, 0xd7, 0xcd, + 0xcb, 0xd2, 0x78, 0xfc, 0x4e, 0x12, 0xe1, 0xf0, 0x60, 0xfe, 0x9c, 0x51, 0x10, 0x77, 0x99, 0x1f, + 0x24, 0x38, 0x4d, 0x34, 0x1d, 0x92, 0x7f, 0xe0, 0x21, 0x86, 0xe4, 0xb7, 0xbf, 0x60, 0xc1, 0xd9, + 0xdc, 0x3c, 0xa0, 0xe8, 0x12, 0x8c, 0x38, 0x6d, 0x97, 0x8b, 0xc8, 0xc5, 0x31, 0xca, 0x44, 0x31, + 0xd5, 0x0a, 0x17, 0x90, 0x2b, 0xa8, 0xca, 0x4f, 0x5e, 0xc8, 0xcd, 0x4f, 0xde, 0x33, 0xdd, 0xb8, + 0xfd, 0x3d, 0x16, 0x08, 0x67, 0xcf, 0x3e, 0xce, 0xee, 0x4f, 0xc1, 0xd8, 0x5e, 0x3a, 0x6d, 0xcf, + 0x85, 0x7c, 0xef, 0x57, 0x91, 0xac, 0x47, 0x31, 0x64, 0x46, 0x8a, 0x1e, 0x83, 0x96, 0xdd, 0x00, + 0x01, 0x2d, 0x13, 0x26, 0x00, 0xee, 0xdd, 0x9b, 0xe7, 0x00, 0x1a, 0x0c, 0x57, 0x4b, 0xf2, 0xae, + 0x6e, 0xe6, 0xb2, 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0xf7, 0x05, 0x18, 0x95, 0x69, 0x62, 0x3a, 0x5e, + 0x3f, 0x62, 0x9a, 0x23, 0xe5, 0x8d, 0xa4, 0xaf, 0x78, 0x26, 0x47, 0xac, 0xc6, 0xd2, 0x2d, 0xf5, + 0x8a, 0x5f, 0x93, 0x00, 0x1c, 0xe3, 0xd0, 0x5d, 0x14, 0x76, 0x36, 0x19, 0x7a, 0xc2, 0x35, 0xb1, + 0xc6, 0x8b, 0xb1, 0x84, 0xa3, 0x4f, 0xc0, 0x14, 0xaf, 0x17, 0xf8, 0x6d, 0x67, 0x9b, 0xeb, 0x1e, + 0x06, 0x55, 0x4c, 0x81, 0xa9, 0xb5, 0x04, 0xec, 0xf0, 0x60, 0xfe, 0x54, 0xb2, 0x8c, 0x29, 0xd5, + 0x52, 0x54, 0x98, 0x89, 0x11, 0x6f, 0x84, 0xee, 0xfe, 0x94, 0x65, 0x52, 0x0c, 0xc2, 0x3a, 0x9e, + 0xfd, 0x59, 0x40, 0xe9, 0x84, 0x39, 0xe8, 0x35, 0x6e, 0x57, 0xea, 0x06, 0xa4, 0xd1, 0x4d, 0xc9, + 0xa6, 0x7b, 0xce, 0x4b, 0xaf, 0x22, 0x5e, 0x0b, 0xab, 0xfa, 0xf6, 0x5f, 0x2d, 0xc2, 0x54, 0xd2, + 0x8f, 0x1a, 0x5d, 0x83, 0x21, 0xce, 0x7a, 0x08, 0xf2, 0x5d, 0x6c, 0x38, 0x34, 0xef, 0x6b, 0x76, + 0x08, 0x0b, 0xee, 0x45, 0xd4, 0x47, 0x6f, 0xc0, 0x68, 0xc3, 0xbf, 0xeb, 0xdd, 0x75, 0x82, 0xc6, + 0x62, 0xb5, 0x22, 0x96, 0x73, 0xe6, 0xa3, 0xb2, 0x1c, 0xa3, 0xe9, 0x1e, 0xdd, 0x4c, 0x5f, 0x19, + 0x83, 0xb0, 0x4e, 0x0e, 0x6d, 0xb0, 0xf8, 0xde, 0x5b, 0xee, 0xf6, 0x9a, 0xd3, 0xee, 0xe6, 0x64, + 0xb0, 0x2c, 0x91, 0x34, 0xca, 0xe3, 0x22, 0x08, 0x38, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x1c, 0xcc, + 0x84, 0x39, 0xa2, 0xee, 0xbc, 0xfc, 0x69, 0xdd, 0xa4, 0xbf, 0x4b, 0x8f, 0xd0, 0xe7, 0x7e, 0x96, + 0x50, 0x3c, 0xab, 0x19, 0xfb, 0x47, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0x74, 0x9a, 0xd6, 0x31, 0xa5, + 0xd3, 0xc4, 0x30, 0x42, 0x5a, 0xed, 0x68, 0xbf, 0xec, 0x06, 0xdd, 0x92, 0x4c, 0xaf, 0x08, 0x9c, + 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0xce, 0xd3, 0xe2, 0xd7, 0x31, 0xe7, 0xe9, 0xc0, 0x09, + 0xe6, 0x3c, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xed, 0x0b, 0xa6, 0x3f, 0x73, 0x1d, 0x5e, + 0xe5, 0x28, 0xe9, 0xec, 0x7a, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x9a, 0xda, 0x81, 0x43, 0xf9, 0x0f, + 0xf3, 0xb4, 0xb1, 0x41, 0xe6, 0x1e, 0x14, 0x99, 0x4d, 0x87, 0x1f, 0x34, 0xb3, 0xe9, 0xaa, 0xcc, + 0x47, 0x3a, 0x92, 0xef, 0x11, 0xc4, 0xd2, 0x8d, 0xf6, 0xc8, 0x42, 0x7a, 0x5b, 0xcf, 0xe1, 0x5a, + 0xca, 0x3f, 0x09, 0x54, 0x7a, 0xd6, 0x3e, 0x33, 0xb7, 0x7e, 0x8f, 0x05, 0xa7, 0xdb, 0x59, 0xe9, + 0x8c, 0x85, 0x5e, 0xfe, 0xc5, 0xbe, 0x33, 0x26, 0x1b, 0x0d, 0x32, 0x79, 0x56, 0x26, 0x1a, 0xce, + 0x6e, 0x8e, 0x0e, 0x74, 0xb0, 0xd9, 0x10, 0xfa, 0xe1, 0x27, 0x72, 0x52, 0xc0, 0x76, 0x49, 0xfc, + 0xba, 0x91, 0x91, 0x6e, 0xf4, 0xfd, 0x79, 0xe9, 0x46, 0xfb, 0x4e, 0x32, 0xfa, 0x9a, 0x4a, 0xfe, + 0x3a, 0x9e, 0xbf, 0x94, 0x78, 0x6a, 0xd7, 0x9e, 0x29, 0x5f, 0x5f, 0x53, 0x29, 0x5f, 0xbb, 0x04, + 0x6f, 0xe5, 0x09, 0x5d, 0x7b, 0x26, 0x7a, 0xd5, 0x92, 0xb5, 0x4e, 0x1e, 0x4f, 0xb2, 0x56, 0xe3, + 0xaa, 0xe1, 0xf9, 0x42, 0x9f, 0xee, 0x71, 0xd5, 0x18, 0x74, 0xbb, 0x5f, 0x36, 0x3c, 0x31, 0xed, + 0xf4, 0x03, 0x25, 0xa6, 0xbd, 0xad, 0x27, 0x7a, 0x45, 0x3d, 0x32, 0x99, 0x52, 0xa4, 0x3e, 0xd3, + 0xbb, 0xde, 0xd6, 0x2f, 0xc0, 0x99, 0x7c, 0xba, 0xea, 0x9e, 0x4b, 0xd3, 0xcd, 0xbc, 0x02, 0x53, + 0x69, 0x63, 0x4f, 0x9d, 0x4c, 0xda, 0xd8, 0xd3, 0xc7, 0x9e, 0x36, 0xf6, 0xcc, 0x09, 0xa4, 0x8d, + 0x7d, 0xe4, 0x04, 0xd3, 0xc6, 0xde, 0x66, 0xc6, 0x2c, 0x3c, 0x64, 0x8e, 0x08, 0x36, 0x9b, 0x1d, + 0xd8, 0x34, 0x2b, 0xae, 0x0e, 0xff, 0x38, 0x05, 0xc2, 0x31, 0xa9, 0x8c, 0x74, 0xb4, 0xb3, 0x0f, + 0x21, 0x1d, 0xed, 0x7a, 0x9c, 0x8e, 0xf6, 0x6c, 0xfe, 0x54, 0x67, 0xb8, 0x3f, 0xe4, 0x24, 0xa1, + 0xbd, 0xad, 0x27, 0x8f, 0x7d, 0xb4, 0x8b, 0xc6, 0x22, 0x4b, 0xf0, 0xd8, 0x25, 0x65, 0xec, 0xab, + 0x3c, 0x65, 0xec, 0x63, 0xf9, 0x27, 0x79, 0xf2, 0xba, 0x33, 0x12, 0xc5, 0xd2, 0x7e, 0xa9, 0xb0, + 0x86, 0x2c, 0xac, 0x6e, 0x4e, 0xbf, 0x54, 0x5c, 0xc4, 0x74, 0xbf, 0x14, 0x08, 0xc7, 0xa4, 0xec, + 0xef, 0x2b, 0xc0, 0xf9, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x63, 0x05, 0x6e, 0x42, 0x9a, 0xca, + 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0x51, 0xda, 0xae, 0xc2, 0xb4, 0xf2, 0x9b, 0x68, 0xba, 0xf5, 0xfd, + 0xf5, 0xf8, 0xe5, 0xab, 0x7c, 0xcd, 0x6b, 0x49, 0x04, 0x9c, 0xae, 0x83, 0x16, 0x61, 0xd2, 0x28, + 0xac, 0x94, 0xc5, 0xdb, 0x4c, 0x89, 0x6f, 0x6b, 0x26, 0x18, 0x27, 0xf1, 0xed, 0x2f, 0x59, 0xf0, + 0x48, 0x4e, 0xa6, 0xb7, 0xbe, 0x83, 0x90, 0x6d, 0xc1, 0x64, 0xdb, 0xac, 0xda, 0x23, 0x6e, 0xa2, + 0x91, 0x4f, 0x4e, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0xf6, 0x9f, 0x59, 0x70, 0xae, 0xab, 0x21, + 0x20, 0xc2, 0x70, 0x66, 0xbb, 0x15, 0x3a, 0xcb, 0x01, 0x69, 0x10, 0x2f, 0x72, 0x9d, 0x66, 0xad, + 0x4d, 0xea, 0x9a, 0x3c, 0x9c, 0x59, 0xd4, 0x5d, 0x5d, 0xab, 0x2d, 0xa6, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x02, 0x34, 0xa7, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x23, + 0x30, 0xae, 0x0c, 0x0c, 0xb5, 0x19, 0x67, 0x07, 0x3b, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf4, + 0xeb, 0xbf, 0x7f, 0xfe, 0x7d, 0xbf, 0xf9, 0xfb, 0xe7, 0xdf, 0xf7, 0x3b, 0xbf, 0x7f, 0xfe, 0x7d, + 0xdf, 0x71, 0xff, 0xbc, 0xf5, 0xeb, 0xf7, 0xcf, 0x5b, 0xbf, 0x79, 0xff, 0xbc, 0xf5, 0x3b, 0xf7, + 0xcf, 0x5b, 0xbf, 0x77, 0xff, 0xbc, 0xf5, 0xc5, 0x3f, 0x38, 0xff, 0xbe, 0x4f, 0x15, 0xf6, 0xae, + 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xf2, 0xaa, 0x3b, 0x4d, 0x03, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -11552,6 +11587,20 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -15561,6 +15610,44 @@ func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -17949,6 +18036,49 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AllocateLoadBalancerNodePorts != nil { + i-- + if *m.AllocateLoadBalancerNodePorts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.IPFamilies) > 0 { + for iNdEx := len(m.IPFamilies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPFamilies[iNdEx]) + copy(dAtA[i:], m.IPFamilies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPFamilies[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.ClusterIPs) > 0 { + for iNdEx := len(m.ClusterIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIPs[iNdEx]) + copy(dAtA[i:], m.ClusterIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIPs[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if m.IPFamilyPolicy != nil { + i -= len(*m.IPFamilyPolicy) + copy(dAtA[i:], *m.IPFamilyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamilyPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } if len(m.TopologyKeys) > 0 { for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.TopologyKeys[iNdEx]) @@ -17960,13 +18090,6 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x82 } } - if m.IPFamily != nil { - i -= len(*m.IPFamily) - copy(dAtA[i:], *m.IPFamily) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) - i-- - dAtA[i] = 0x7a - } if m.SessionAffinityConfig != nil { { size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -18099,6 +18222,20 @@ func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } { size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -21043,6 +21180,12 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -22499,6 +22642,22 @@ func (m *PodTemplateSpec) Size() (n int) { return n } +func (m *PortStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PortworxVolumeSource) Size() (n int) { if m == nil { return 0 @@ -23405,16 +23564,31 @@ func (m *ServiceSpec) Size() (n int) { l = m.SessionAffinityConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.IPFamily != nil { - l = len(*m.IPFamily) - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.TopologyKeys) > 0 { for _, s := range m.TopologyKeys { l = len(s) n += 2 + l + sovGenerated(uint64(l)) } } + if m.IPFamilyPolicy != nil { + l = len(*m.IPFamilyPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.ClusterIPs) > 0 { + for _, s := range m.ClusterIPs { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.IPFamilies) > 0 { + for _, s := range m.IPFamilies { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AllocateLoadBalancerNodePorts != nil { + n += 3 + } return n } @@ -23426,6 +23600,12 @@ func (m *ServiceStatus) Size() (n int) { _ = l l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -25156,9 +25336,15 @@ func (this *LoadBalancerIngress) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]PortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "PortStatus", "PortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") return s @@ -26257,6 +26443,18 @@ func (this *PodTemplateSpec) String() string { }, "") return s } +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *PortworxVolumeSource) String() string { if this == nil { return "nil" @@ -26979,8 +27177,11 @@ func (this *ServiceSpec) String() string { `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`, + `ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`, + `IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`, + `AllocateLoadBalancerNodePorts:` + valueToStringGenerated(this.AllocateLoadBalancerNodePorts) + `,`, `}`, }, "") return s @@ -26989,8 +27190,14 @@ func (this *ServiceStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ServiceStatus{`, `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -27413,10 +27620,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -27574,10 +27778,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -27691,10 +27892,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -27778,10 +27976,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28015,10 +28210,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28185,10 +28377,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28322,10 +28511,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28441,10 +28627,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -28720,7 +28903,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -28881,10 +29064,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29130,7 +29310,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -29183,10 +29363,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29300,10 +29477,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29537,10 +29711,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29774,10 +29945,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -29947,10 +30115,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30120,10 +30285,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30193,10 +30355,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30374,10 +30533,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30494,10 +30650,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30614,10 +30767,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -30810,7 +30960,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -30938,7 +31088,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -30976,10 +31126,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31083,10 +31230,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31222,10 +31366,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31342,10 +31483,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31555,10 +31693,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31696,10 +31831,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -31857,10 +31989,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -32609,10 +32738,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -32713,10 +32839,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -32900,10 +33023,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33061,10 +33181,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33147,10 +33264,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33400,10 +33514,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33517,10 +33628,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33824,10 +33932,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33896,10 +34001,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -33983,10 +34085,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34160,10 +34259,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34267,10 +34363,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34388,10 +34481,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34574,10 +34664,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34743,10 +34830,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -34898,10 +34982,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35018,10 +35099,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35138,10 +35216,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35295,10 +35370,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35448,10 +35520,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35645,10 +35714,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -35763,10 +35829,7 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -36515,10 +36578,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -36635,10 +36695,7 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -36744,10 +36801,7 @@ func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37278,10 +37332,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37398,10 +37449,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37503,10 +37551,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37620,10 +37665,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37705,10 +37747,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -37894,10 +37933,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38177,7 +38213,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -38194,10 +38230,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38477,7 +38510,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -38494,10 +38527,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38611,10 +38641,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38767,10 +38794,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -38916,10 +38940,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39086,10 +39107,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39223,10 +39241,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39439,10 +39454,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39556,10 +39568,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39717,10 +39726,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39834,10 +39840,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -39952,10 +39955,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40313,10 +40313,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40674,10 +40671,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40811,10 +40805,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -40936,10 +40927,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41055,10 +41043,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41252,7 +41237,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41381,7 +41366,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41510,7 +41495,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41639,7 +41624,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41768,7 +41753,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -41785,10 +41770,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41905,10 +41887,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -41992,10 +41971,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42112,10 +42088,7 @@ func (m *List) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42223,16 +42196,47 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, PortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42316,10 +42320,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42401,10 +42402,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42519,10 +42517,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42656,10 +42651,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -42808,10 +42800,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43022,10 +43011,7 @@ func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43142,10 +43128,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43227,10 +43210,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43346,10 +43326,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43498,10 +43475,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43615,10 +43589,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43738,10 +43709,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -43985,10 +43953,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44074,10 +44039,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44267,10 +44229,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44353,10 +44312,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44473,10 +44429,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44558,10 +44511,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44723,7 +44673,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -44740,10 +44690,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44827,10 +44774,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -44976,10 +44920,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -45097,10 +45038,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -45368,10 +45306,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -45533,7 +45468,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -45662,7 +45597,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -45981,10 +45916,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46354,10 +46286,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46471,10 +46400,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46748,10 +46674,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -46900,10 +46823,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47052,10 +46972,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47299,10 +47216,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47419,10 +47333,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47707,10 +47618,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -47936,7 +47844,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -47987,10 +47895,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -48106,10 +48011,7 @@ func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -48211,10 +48113,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -48331,10 +48230,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49176,10 +49072,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49341,7 +49234,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -49624,10 +49517,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49773,10 +49663,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -49890,10 +49777,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50042,10 +49926,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50163,10 +50044,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50316,10 +50194,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50437,10 +50312,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50602,10 +50474,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -50849,10 +50718,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51000,10 +50866,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51118,10 +50981,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51315,10 +51175,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51400,10 +51257,7 @@ func (m *PodIP) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51520,10 +51374,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51781,10 +51632,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51910,10 +51758,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -51995,10 +51840,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52080,10 +51922,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52465,10 +52304,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52554,10 +52390,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -52889,7 +52722,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -53729,7 +53562,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -53835,10 +53668,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54318,10 +54148,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54437,10 +54264,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54556,10 +54380,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54676,10 +54497,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54795,10 +54613,141 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -54932,10 +54881,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55018,10 +54964,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55201,10 +55144,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55306,10 +55246,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55487,10 +55424,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55594,10 +55528,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -55827,10 +55758,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56128,10 +56056,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56429,10 +56354,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56581,10 +56503,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56733,10 +56652,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -56947,10 +56863,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57067,10 +56980,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57250,7 +57160,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -57322,10 +57232,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57504,10 +57411,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57654,10 +57558,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57806,10 +57707,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -57926,10 +57824,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58091,7 +57986,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58176,10 +58071,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58341,7 +58233,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58470,7 +58362,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58487,10 +58379,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58652,7 +58541,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58781,7 +58670,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -58798,10 +58687,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -58979,10 +58865,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59332,10 +59215,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59685,10 +59565,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59772,10 +59649,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -59921,10 +59795,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60039,10 +59910,7 @@ func (m *SeccompProfile) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60236,7 +60104,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -60395,7 +60263,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -60433,10 +60301,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60540,10 +60405,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60679,10 +60541,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60799,10 +60658,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -60940,10 +60796,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61057,10 +60910,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61217,10 +61067,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61571,10 +61418,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61657,10 +61501,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61809,10 +61650,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -61984,10 +61822,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62104,10 +61939,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62241,10 +62073,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62361,10 +62190,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62582,10 +62408,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62667,10 +62490,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -62864,7 +62684,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -62937,13 +62757,205 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ServiceType(dAtA[iNdEx:postIndex]) + m.Type = ServiceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) } - var stringLen uint64 + m.HealthCheckNodePort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -62953,29 +62965,16 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -62985,29 +62984,17 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: + m.PublishNotReadyAddresses = bool(v != 0) + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -63017,27 +63004,31 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + if m.SessionAffinityConfig == nil { + m.SessionAffinityConfig = &SessionAffinityConfig{} + } + if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63065,11 +63056,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 10: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63097,11 +63088,12 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalName = string(dAtA[iNdEx:postIndex]) + s := IPFamilyPolicyType(dAtA[iNdEx:postIndex]) + m.IPFamilyPolicy = &s iNdEx = postIndex - case 11: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIPs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63129,86 +63121,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) - } - m.HealthCheckNodePort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HealthCheckNodePort |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PublishNotReadyAddresses = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SessionAffinityConfig == nil { - m.SessionAffinityConfig = &SessionAffinityConfig{} - } - if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ClusterIPs = append(m.ClusterIPs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 15: + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPFamilies", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63236,14 +63153,13 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := IPFamily(dAtA[iNdEx:postIndex]) - m.IPFamily = &s + m.IPFamilies = append(m.IPFamilies, IPFamily(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocateLoadBalancerNodePorts", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -63253,34 +63169,20 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := bool(v != 0) + m.AllocateLoadBalancerNodePorts = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63357,16 +63259,47 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63452,10 +63385,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63657,10 +63587,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63862,10 +63789,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -63979,10 +63903,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64097,10 +64018,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64282,10 +64200,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64483,10 +64398,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64600,10 +64512,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64687,10 +64596,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -64859,10 +64765,7 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65009,10 +64912,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65127,10 +65027,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65244,10 +65141,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65478,10 +65372,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65567,10 +65458,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -65764,10 +65652,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -66861,10 +66746,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -67042,10 +66924,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -67147,10 +67026,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -67299,10 +67175,7 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 916e2601e..14c733e9f 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.core.v1; @@ -718,7 +718,6 @@ message Container { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional Probe startupProbe = 22; @@ -817,6 +816,7 @@ message ContainerPort { // Protocol for port. Must be UDP, TCP, or SCTP. // Defaults to "TCP". // +optional + // +default="TCP" optional string protocol = 4; // What host IP to bind the external port to. @@ -1404,7 +1404,12 @@ message EphemeralVolumeSource { optional bool readOnly = 2; } -// Event is a report of an event somewhere in the cluster. +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -2032,6 +2037,12 @@ message LoadBalancerIngress { // (typically AWS load-balancers) // +optional optional string hostname = 2; + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + repeated PortStatus ports = 4; } // LoadBalancerStatus represents the status of a load-balancer. @@ -2677,17 +2688,13 @@ message PersistentVolumeClaimSpec { optional string volumeMode = 6; // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // * An existing custom resource that implements data population (Alpha) + // In order to use custom resource types that implement data population, + // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. // +optional optional TypedLocalObjectReference dataSource = 7; } @@ -3340,7 +3347,7 @@ message PodSecurityContext { // volume types which support fsGroup based ownership(and permissions). // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. // +optional optional string fsGroupChangePolicy = 9; @@ -3765,6 +3772,29 @@ message PodTemplateSpec { optional PodSpec spec = 2; } +message PortStatus { + // Port is the port number of the service port of which status is recorded here + optional int32 port = 1; + + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // PortworxVolumeSource represents a Portworx volume resource. message PortworxVolumeSource { // VolumeID uniquely identifies a Portworx volume @@ -3853,6 +3883,7 @@ message Probe { // Represents a projected volume source message ProjectedVolumeSource { // list of volume projections + // +optional repeated VolumeProjection sources = 1; // Mode bits used to set permissions on created files by default. @@ -4722,6 +4753,7 @@ message ServicePort { // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". // Default is TCP. + // +default="TCP" // +optional optional string protocol = 2; @@ -4750,10 +4782,14 @@ message ServicePort { // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional optional int32 nodePort = 5; @@ -4791,30 +4827,68 @@ message ServiceSpec { map selector = 2; // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional optional string clusterIP = 3; + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + repeated string clusterIPs = 18; + // type determines how the Service is exposed. Defaults to ClusterIP. Valid // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional optional string type = 4; @@ -4850,10 +4924,10 @@ message ServiceSpec { // +optional repeated string loadBalancerSourceRanges = 9; - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires Type to be // +optional optional string externalName = 10; @@ -4867,10 +4941,14 @@ message ServiceSpec { optional string externalTrafficPolicy = 11; // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). // +optional optional int32 healthCheckNodePort = 12; @@ -4889,24 +4967,6 @@ message ServiceSpec { // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. - // IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, - // you can specify ipFamily when creating a ClusterIP Service to determine whether the - // controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when - // creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In - // either case, if you do not specify an ipFamily explicitly, it will default to the - // cluster's primary IP family. - // This field is part of an alpha feature, and you should not make any assumptions about its - // semantics other than those described above. In particular, you should not assume that it - // can (or cannot) be changed after creation time; that it can only have the values "IPv4" - // and "IPv6"; or that its current value on a given Service correctly reflects the current - // state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service - // is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in - // the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an - // irrelevant value anyway.) - // +optional - optional string ipFamily = 15; - // topologyKeys is a preference-order list of topology keys which // implementations of services should use to preferentially sort endpoints // when accessing this Service, it can not be used at the same time as @@ -4919,8 +4979,51 @@ message ServiceSpec { // The special value "*" may be used to mean "any topology". This catch-all // value, if used, only makes sense as the last value in the list. // If this is not specified or empty, no topology constraints will be applied. + // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. // +optional repeated string topologyKeys = 16; + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + repeated string ipFamilies = 19; + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + optional string ipFamilyPolicy = 17; + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It may be + // set to "false" if the cluster load-balancer does not rely on NodePorts. + // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer + // and will be cleared if the type is changed to any other type. + // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +optional + optional bool allocateLoadBalancerNodePorts = 20; } // ServiceStatus represents the current status of a service. @@ -4929,6 +5032,14 @@ message ServiceStatus { // if one is present. // +optional optional LoadBalancerStatus loadBalancer = 1; + + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go index 5bc9cd5bf..4e249d03a 100644 --- a/vendor/k8s.io/api/core/v1/resource.go +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -21,44 +21,39 @@ import ( ) // Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) +func (rn ResourceName) String() string { + return string(rn) } -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} +// Cpu returns the Cpu limit if specified. +func (rl *ResourceList) Cpu() *resource.Quantity { + return rl.Name(ResourceCPU, resource.DecimalSI) } -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} +// Memory returns the Memory limit if specified. +func (rl *ResourceList) Memory() *resource.Quantity { + return rl.Name(ResourceMemory, resource.BinarySI) } -// Returns the Storage limit if specified. -func (self *ResourceList) Storage() *resource.Quantity { - if val, ok := (*self)[ResourceStorage]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} +// Storage returns the Storage limit if specified. +func (rl *ResourceList) Storage() *resource.Quantity { + return rl.Name(ResourceStorage, resource.BinarySI) } -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} +// Pods returns the list of pods +func (rl *ResourceList) Pods() *resource.Quantity { + return rl.Name(ResourcePods, resource.DecimalSI) +} + +// StorageEphemeral returns the list of ephemeral storage volumes, if any +func (rl *ResourceList) StorageEphemeral() *resource.Quantity { + return rl.Name(ResourceEphemeralStorage, resource.BinarySI) } -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { +// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format. +func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity { + if val, ok := (*rl)[name]; ok { return &val } - return &resource.Quantity{} + return &resource.Quantity{Format: defaultFormat} } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index f3ec52e71..769b70a95 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -489,17 +489,13 @@ type PersistentVolumeClaimSpec struct { // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // * An existing custom resource that implements data population (Alpha) + // In order to use custom resource types that implement data population, + // the AnyVolumeDataSource feature gate must be enabled. // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. // +optional DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` } @@ -1615,6 +1611,7 @@ type ServiceAccountTokenProjection struct { // Represents a projected volume source type ProjectedVolumeSource struct { // list of volume projections + // +optional Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` // Mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. @@ -1840,6 +1837,7 @@ type ContainerPort struct { // Protocol for port. Must be UDP, TCP, or SCTP. // Defaults to "TCP". // +optional + // +default="TCP" Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` // What host IP to bind the external port to. // +optional @@ -2287,7 +2285,6 @@ type Container struct { // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // when it might take a long time to load data or warm a cache, than during steady-state operation. // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` @@ -3298,7 +3295,7 @@ type PodSecurityContext struct { // volume types which support fsGroup based ownership(and permissions). // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. // +optional FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` // The seccomp options to use by the containers in this pod. @@ -3943,12 +3940,26 @@ const ( ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" ) +// These are the valid conditions of a service. +const ( + // LoadBalancerPortsError represents the condition of the requested ports + // on the cloud load balancer instance. + LoadBalancerPortsError = "LoadBalancerPortsError" +) + // ServiceStatus represents the current status of a service. type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, // if one is present. // +optional LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // LoadBalancerStatus represents the status of a load-balancer. @@ -3971,10 +3982,21 @@ type LoadBalancerIngress struct { // (typically AWS load-balancers) // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` } +const ( + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 +) + // IPFamily represents the IP Family (IPv4 or IPv6). This type is used -// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). type IPFamily string const ( @@ -3982,8 +4004,29 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" - // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service - MaxServiceTopologyKeys = 16 +) + +// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +type IPFamilyPolicyType string + +const ( + // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. + // The IPFamily assigned is based on the default IPFamily used by the cluster + // or as identified by service.spec.ipFamilies field + IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack" + // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when + // the cluster is configured for dual-stack. If the cluster is not configured + // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not + // set in service.spec.ipFamilies then the service will be assigned the default IPFamily + // configured on the cluster + IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack" + // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using + // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The + // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If + // service.spec.ipFamilies was not provided then it will be assigned according to how they are + // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative + // IPFamily will be added by apiserver + IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack" ) // ServiceSpec describes the attributes that a user creates on a service. @@ -4007,30 +4050,68 @@ type ServiceSpec struct { Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"` + // type determines how the Service is exposed. Defaults to ClusterIP. Valid // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -4066,10 +4147,10 @@ type ServiceSpec struct { // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires Type to be // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` @@ -4083,10 +4164,14 @@ type ServiceSpec struct { ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). // +optional HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` @@ -4105,24 +4190,6 @@ type ServiceSpec struct { // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. - // IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, - // you can specify ipFamily when creating a ClusterIP Service to determine whether the - // controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when - // creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In - // either case, if you do not specify an ipFamily explicitly, it will default to the - // cluster's primary IP family. - // This field is part of an alpha feature, and you should not make any assumptions about its - // semantics other than those described above. In particular, you should not assume that it - // can (or cannot) be changed after creation time; that it can only have the values "IPv4" - // and "IPv6"; or that its current value on a given Service correctly reflects the current - // state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service - // is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in - // the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an - // irrelevant value anyway.) - // +optional - IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` - // topologyKeys is a preference-order list of topology keys which // implementations of services should use to preferentially sort endpoints // when accessing this Service, it can not be used at the same time as @@ -4135,8 +4202,54 @@ type ServiceSpec struct { // The special value "*" may be used to mean "any topology". This catch-all // value, if used, only makes sense as the last value in the list. // If this is not specified or empty, no topology constraints will be applied. + // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. // +optional TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + + // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. + // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"` + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It may be + // set to "false" if the cluster load-balancer does not rely on NodePorts. + // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer + // and will be cleared if the type is changed to any other type. + // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +optional + AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"` } // ServicePort contains information on service's port. @@ -4151,6 +4264,7 @@ type ServicePort struct { // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". // Default is TCP. + // +default="TCP" // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` @@ -4179,10 +4293,14 @@ type ServicePort struct { // +optional TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` @@ -5275,7 +5393,12 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Event is a report of an event somewhere in the cluster. +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -6104,3 +6227,26 @@ const ( // and data streams for a single forwarded connection PortForwardRequestIDHeader = "requestID" ) + +// PortStatus represents the error condition of a service port + +type PortStatus struct { + // Port is the port number of the service port of which status is recorded here + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 61832b815..c58d8ac56 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -339,7 +339,7 @@ var map_Container = map[string]string{ "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -637,7 +637,7 @@ func (EphemeralVolumeSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", + "": "Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", @@ -953,6 +953,7 @@ var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } func (LoadBalancerIngress) SwaggerDoc() map[string]string { @@ -1310,7 +1311,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", + "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1602,7 +1603,7 @@ var map_PodSecurityContext = map[string]string{ "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.", "seccompProfile": "The seccomp options to use by the containers in this pod.", } @@ -1723,6 +1724,16 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string { return map_PodTemplateSpec } +var map_PortStatus = map[string]string{ + "port": "Port is the port number of the service port of which status is recorded here", + "protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (PortStatus) SwaggerDoc() map[string]string { + return map_PortStatus +} + var map_PortworxVolumeSource = map[string]string{ "": "PortworxVolumeSource represents a Portworx volume resource.", "volumeID": "VolumeID uniquely identifies a Portworx volume", @@ -2209,7 +2220,7 @@ var map_ServicePort = map[string]string{ "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -2226,22 +2237,25 @@ func (ServiceProxyOptions) SwaggerDoc() map[string]string { } var map_ServiceSpec = map[string]string{ - "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", - "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", - "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", - "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", - "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", - "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, you can specify ipFamily when creating a ClusterIP Service to determine whether the controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In either case, if you do not specify an ipFamily explicitly, it will default to the cluster's primary IP family. This field is part of an alpha feature, and you should not make any assumptions about its semantics other than those described above. In particular, you should not assume that it can (or cannot) be changed after creation time; that it can only have the values \"IPv4\" and \"IPv6\"; or that its current value on a given Service correctly reflects the current state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an irrelevant value anyway.)", - "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be", + "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", + "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", + "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.", + "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2251,6 +2265,7 @@ func (ServiceSpec) SwaggerDoc() map[string]string { var map_ServiceStatus = map[string]string{ "": "ServiceStatus represents the current status of a service.", "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", + "conditions": "Current service state", } func (ServiceStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 22aa55b91..a506f17f6 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -19,10 +19,16 @@ package v1 const ( LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" - LabelZoneRegionStable = "topology.kubernetes.io/region" + LabelFailureDomainBetaZone = "failure-domain.beta.kubernetes.io/zone" + LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region" + LabelTopologyZone = "topology.kubernetes.io/zone" + LabelTopologyRegion = "topology.kubernetes.io/region" + + // Legacy names for compat. + LabelZoneFailureDomain = LabelFailureDomainBetaZone // deprecated, remove after 1.20 + LabelZoneRegion = LabelFailureDomainBetaRegion // deprecated, remove after 1.20 + LabelZoneFailureDomainStable = LabelTopologyZone + LabelZoneRegionStable = LabelTopologyRegion LabelInstanceType = "beta.kubernetes.io/instance-type" LabelInstanceTypeStable = "node.kubernetes.io/instance-type" diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 445c7c04a..b868f9ba5 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -2144,6 +2144,13 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2163,7 +2170,9 @@ func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { if in.Ingress != nil { in, out := &in.Ingress, &out.Ingress *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -4079,6 +4088,27 @@ func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortStatus) DeepCopyInto(out *PortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus. +func (in *PortStatus) DeepCopy() *PortStatus { + if in == nil { + return nil + } + out := new(PortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { *out = *in @@ -5270,6 +5300,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { (*out)[key] = val } } + if in.ClusterIPs != nil { + in, out := &in.ClusterIPs, &out.ClusterIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.ExternalIPs != nil { in, out := &in.ExternalIPs, &out.ExternalIPs *out = make([]string, len(*in)) @@ -5285,16 +5320,26 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } - if in.IPFamily != nil { - in, out := &in.IPFamily, &out.IPFamily - *out = new(IPFamily) - **out = **in - } if in.TopologyKeys != nil { in, out := &in.TopologyKeys, &out.TopologyKeys *out = make([]string, len(*in)) copy(*out, *in) } + if in.IPFamilies != nil { + in, out := &in.IPFamilies, &out.IPFamilies + *out = make([]IPFamily, len(*in)) + copy(*out, *in) + } + if in.IPFamilyPolicy != nil { + in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy + *out = new(IPFamilyPolicyType) + **out = **in + } + if in.AllocateLoadBalancerNodePorts != nil { + in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts + *out = new(bool) + **out = **in + } return } @@ -5312,6 +5357,13 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec { func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { *out = *in in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go index 45c4382cf..c7db73cb7 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -200,54 +200,58 @@ func init() { } var fileDescriptor_772f83c5b34e07a5 = []byte{ - // 746 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, - 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca, - 0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba, - 0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0, - 0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a, - 0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10, - 0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64, - 0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24, - 0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97, - 0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88, - 0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca, - 0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c, - 0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58, - 0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23, - 0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb, - 0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13, - 0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2, - 0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68, - 0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c, - 0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e, - 0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3, - 0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88, - 0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, - 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51, - 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a, - 0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7, - 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55, - 0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00, - 0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3, - 0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a, - 0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37, - 0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2, - 0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31, - 0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b, - 0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3, - 0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35, - 0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3, - 0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79, - 0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31, - 0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f, - 0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5, - 0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b, - 0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96, - 0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, - 0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, - 0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00, + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8f, 0xe3, 0x44, + 0x10, 0x8d, 0x27, 0x13, 0xd6, 0xee, 0xec, 0x88, 0xdd, 0x16, 0x87, 0x68, 0x00, 0x7b, 0x14, 0x84, + 0x88, 0x34, 0xd0, 0x26, 0x23, 0x40, 0x2b, 0x38, 0x8d, 0x61, 0xf9, 0x90, 0x60, 0x19, 0x7a, 0xe7, + 0x80, 0x10, 0x07, 0x7a, 0xec, 0x5a, 0xc7, 0x24, 0x76, 0x5b, 0xdd, 0x9d, 0x48, 0xb9, 0xf1, 0x0f, + 0xe0, 0x47, 0x21, 0x34, 0xc7, 0x3d, 0xee, 0xc9, 0x62, 0xbc, 0x12, 0x3f, 0x62, 0x4f, 0xa8, 0xdb, + 0x9f, 0x43, 0x80, 0xcd, 0xcd, 0xfd, 0xaa, 0xde, 0xab, 0x7a, 0xe5, 0x2a, 0xf4, 0xf9, 0xf2, 0x81, + 0x24, 0x09, 0xf7, 0x97, 0xeb, 0x2b, 0x10, 0x19, 0x28, 0x90, 0xfe, 0x06, 0xb2, 0x88, 0x0b, 0xbf, + 0x0e, 0xb0, 0x3c, 0xf1, 0xa3, 0x44, 0x86, 0x7c, 0x03, 0x62, 0xeb, 0x6f, 0xe6, 0x6c, 0x95, 0x2f, + 0xd8, 0xdc, 0x8f, 0x21, 0x03, 0xc1, 0x14, 0x44, 0x24, 0x17, 0x5c, 0x71, 0xfc, 0x66, 0x95, 0x4e, + 0x58, 0x9e, 0x90, 0x36, 0x9d, 0x34, 0xe9, 0xc7, 0xef, 0xc5, 0x89, 0x5a, 0xac, 0xaf, 0x48, 0xc8, + 0x53, 0x3f, 0xe6, 0x31, 0xf7, 0x0d, 0xeb, 0x6a, 0xfd, 0xc4, 0xbc, 0xcc, 0xc3, 0x7c, 0x55, 0x6a, + 0xc7, 0xd3, 0x5e, 0xf1, 0x90, 0x0b, 0xf0, 0x37, 0x3b, 0x15, 0x8f, 0x3f, 0xe8, 0x72, 0x52, 0x16, + 0x2e, 0x92, 0x4c, 0xf7, 0x97, 0x2f, 0x63, 0x0d, 0x48, 0x3f, 0x05, 0xc5, 0xfe, 0x8d, 0xe5, 0xff, + 0x17, 0x4b, 0xac, 0x33, 0x95, 0xa4, 0xb0, 0x43, 0xf8, 0xe8, 0x65, 0x04, 0x19, 0x2e, 0x20, 0x65, + 0xff, 0xe4, 0x4d, 0xff, 0x1a, 0x22, 0xfb, 0x61, 0x16, 0xe5, 0x3c, 0xc9, 0x14, 0x3e, 0x45, 0x0e, + 0x8b, 0x22, 0x01, 0x52, 0x82, 0x9c, 0x58, 0x27, 0xc3, 0x99, 0x13, 0x1c, 0x95, 0x85, 0xe7, 0x9c, + 0x37, 0x20, 0xed, 0xe2, 0x18, 0x10, 0x0a, 0x79, 0x16, 0x25, 0x2a, 0xe1, 0x99, 0x9c, 0x1c, 0x9c, + 0x58, 0xb3, 0xf1, 0xd9, 0x9c, 0xfc, 0xef, 0x7c, 0x49, 0x53, 0xe9, 0xd3, 0x96, 0x18, 0xe0, 0xeb, + 0xc2, 0x1b, 0x94, 0x85, 0x87, 0x3a, 0x8c, 0xf6, 0x84, 0xf1, 0x0c, 0xd9, 0x0b, 0x2e, 0x55, 0xc6, + 0x52, 0x98, 0x0c, 0x4f, 0xac, 0x99, 0x13, 0xdc, 0x2d, 0x0b, 0xcf, 0xfe, 0xb2, 0xc6, 0x68, 0x1b, + 0xc5, 0x17, 0xc8, 0x51, 0x4c, 0xc4, 0xa0, 0x28, 0x3c, 0x99, 0x1c, 0x9a, 0x7e, 0xde, 0xea, 0xf7, + 0xa3, 0xff, 0x10, 0xd9, 0xcc, 0xc9, 0xb7, 0x57, 0x3f, 0x43, 0xa8, 0x93, 0x40, 0x40, 0x16, 0x42, + 0x65, 0xf1, 0xb2, 0x61, 0xd2, 0x4e, 0x04, 0x87, 0xc8, 0x56, 0x3c, 0xe7, 0x2b, 0x1e, 0x6f, 0x27, + 0xa3, 0x93, 0xe1, 0x6c, 0x7c, 0xf6, 0xe1, 0x9e, 0x06, 0xc9, 0x65, 0xcd, 0x7b, 0x98, 0x29, 0xb1, + 0x0d, 0xee, 0xd5, 0x26, 0xed, 0x06, 0xa6, 0xad, 0xb0, 0x36, 0x98, 0xf1, 0x08, 0x1e, 0x69, 0x83, + 0xaf, 0x74, 0x06, 0x1f, 0xd5, 0x18, 0x6d, 0xa3, 0xc7, 0x9f, 0xa0, 0xa3, 0x5b, 0xb2, 0xf8, 0x1e, + 0x1a, 0x2e, 0x61, 0x3b, 0xb1, 0x34, 0x8b, 0xea, 0x4f, 0xfc, 0x1a, 0x1a, 0x6d, 0xd8, 0x6a, 0x0d, + 0xe6, 0x7f, 0x38, 0xb4, 0x7a, 0x7c, 0x7c, 0xf0, 0xc0, 0x9a, 0xfe, 0x6a, 0x21, 0xbc, 0x3b, 0x7e, + 0xec, 0xa1, 0x91, 0x00, 0x16, 0x55, 0x22, 0x76, 0xe0, 0x94, 0x85, 0x37, 0xa2, 0x1a, 0xa0, 0x15, + 0x8e, 0xdf, 0x46, 0x77, 0x24, 0x88, 0x4d, 0x92, 0xc5, 0x46, 0xd3, 0x0e, 0xc6, 0x65, 0xe1, 0xdd, + 0x79, 0x5c, 0x41, 0xb4, 0x89, 0xe1, 0x39, 0x1a, 0x2b, 0x10, 0x69, 0x92, 0x31, 0xa5, 0x53, 0x87, + 0x26, 0xf5, 0xd5, 0xb2, 0xf0, 0xc6, 0x97, 0x1d, 0x4c, 0xfb, 0x39, 0xd3, 0x3f, 0x2c, 0x74, 0xb7, + 0xe9, 0xe8, 0x82, 0x0b, 0x85, 0xdf, 0x40, 0x87, 0xe6, 0x37, 0x1b, 0x3f, 0x81, 0x5d, 0x16, 0xde, + 0xa1, 0x99, 0x80, 0x41, 0xf1, 0x17, 0xc8, 0x36, 0x2b, 0x1b, 0xf2, 0x55, 0xe5, 0x2e, 0x38, 0xd5, + 0x73, 0xba, 0xa8, 0xb1, 0x17, 0x85, 0xf7, 0xfa, 0xee, 0x39, 0x92, 0x26, 0x4c, 0x5b, 0xb2, 0x2e, + 0x93, 0x73, 0xa1, 0x4c, 0x8f, 0xa3, 0xaa, 0x8c, 0x2e, 0x4f, 0x0d, 0xaa, 0x8d, 0xb0, 0x3c, 0x6f, + 0x68, 0x66, 0x8f, 0x9c, 0xca, 0xc8, 0x79, 0x07, 0xd3, 0x7e, 0xce, 0xf4, 0xf9, 0x01, 0x3a, 0x6a, + 0x8c, 0x3c, 0x5e, 0x25, 0x21, 0xe0, 0x9f, 0x90, 0xad, 0x2f, 0x3b, 0x62, 0x8a, 0x19, 0x37, 0xe3, + 0xb3, 0xf7, 0x7b, 0x8b, 0xd3, 0x1e, 0x28, 0xc9, 0x97, 0xb1, 0x06, 0x24, 0xd1, 0xd9, 0xdd, 0x6e, + 0x7e, 0x03, 0x8a, 0x75, 0x87, 0xd1, 0x61, 0xb4, 0x55, 0xc5, 0x9f, 0xa1, 0x71, 0x7d, 0x8a, 0x97, + 0xdb, 0x1c, 0xea, 0x36, 0xa7, 0x35, 0x65, 0x7c, 0xde, 0x85, 0x5e, 0xdc, 0x7e, 0xd2, 0x3e, 0x0d, + 0x7f, 0x8f, 0x1c, 0xa8, 0x1b, 0xd7, 0x27, 0xac, 0x37, 0xfc, 0x9d, 0x3d, 0x37, 0x3c, 0xb8, 0x5f, + 0x17, 0x73, 0x1a, 0x44, 0xd2, 0x4e, 0x0c, 0x5f, 0xa0, 0x91, 0x1e, 0xa7, 0x9c, 0x0c, 0x8d, 0xea, + 0xe9, 0x9e, 0xaa, 0xfa, 0x47, 0x04, 0x47, 0xb5, 0xf2, 0x48, 0xbf, 0x24, 0xad, 0x84, 0xa6, 0xbf, + 0x5b, 0xe8, 0xfe, 0xad, 0x29, 0x7f, 0x9d, 0x48, 0x85, 0x7f, 0xdc, 0x99, 0x34, 0xd9, 0x6f, 0xd2, + 0x9a, 0x6d, 0xe6, 0xdc, 0xde, 0x66, 0x83, 0xf4, 0xa6, 0xfc, 0x1d, 0x1a, 0x25, 0x0a, 0xd2, 0x66, + 0x36, 0xef, 0xee, 0xe9, 0xc2, 0xb4, 0xd7, 0xd9, 0xf8, 0x4a, 0x4b, 0xd0, 0x4a, 0x29, 0x20, 0xd7, + 0x37, 0xee, 0xe0, 0xe9, 0x8d, 0x3b, 0x78, 0x76, 0xe3, 0x0e, 0x7e, 0x29, 0x5d, 0xeb, 0xba, 0x74, + 0xad, 0xa7, 0xa5, 0x6b, 0x3d, 0x2b, 0x5d, 0xeb, 0xcf, 0xd2, 0xb5, 0x7e, 0x7b, 0xee, 0x0e, 0x7e, + 0xb0, 0x1b, 0xcd, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x03, 0x95, 0x92, 0xa5, 0xfa, 0x06, 0x00, + 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -270,6 +274,13 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } if len(m.Topology) > 0 { keysForTopology := make([]string, 0, len(m.Topology)) for k := range m.Topology { @@ -355,6 +366,26 @@ func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.Ready != nil { i-- if *m.Ready { @@ -571,6 +602,10 @@ func (m *Endpoint) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -583,6 +618,12 @@ func (m *EndpointConditions) Size() (n int) { if m.Ready != nil { n += 2 } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } return n } @@ -678,6 +719,7 @@ func (this *Endpoint) String() string { `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") return s @@ -688,6 +730,8 @@ func (this *EndpointConditions) String() string { } s := strings.Join([]string{`&EndpointConditions{`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1031,7 +1075,7 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1042,16 +1086,46 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } m.Topology[mapkey] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1116,16 +1190,55 @@ func (m *EndpointConditions) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1294,10 +1407,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1480,10 +1590,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1600,10 +1707,7 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto index 2cbbdcdb0..4b66a6c57 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.discovery.v1alpha1; @@ -45,8 +45,8 @@ message Endpoint { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass + // DNS label (RFC 1123) validation. // +optional optional string hostname = 3; @@ -67,8 +67,15 @@ message Endpoint { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + optional string nodeName = 6; } // EndpointConditions represents the current condition of an endpoint. @@ -76,9 +83,25 @@ message EndpointConditions { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool terminating = 3; } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go index cf50b501c..34b706ea8 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/types.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -86,8 +86,8 @@ type Endpoint struct { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass + // DNS label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // targetRef is a reference to a Kubernetes object that represents this @@ -106,8 +106,14 @@ type Endpoint struct { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` } // EndpointConditions represents the current condition of an endpoint. @@ -115,9 +121,25 @@ type EndpointConditions struct { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go index 1ba2d60d4..f6c983689 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -31,9 +31,10 @@ var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", - "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", - "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", } func (Endpoint) SwaggerDoc() map[string]string { @@ -41,8 +42,10 @@ func (Endpoint) SwaggerDoc() map[string]string { } var map_EndpointConditions = map[string]string{ - "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", } func (EndpointConditions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go index c72f64acf..13e54d500 100644 --- a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -51,6 +51,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = val } } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } return } @@ -72,6 +77,16 @@ func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { *out = new(bool) **out = **in } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go index ce0046c51..1fdb8ddb7 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -200,54 +200,57 @@ func init() { } var fileDescriptor_ece80bbc872d519b = []byte{ - // 745 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6b, 0xdb, 0x48, - 0x14, 0xb6, 0xe2, 0x88, 0x95, 0xc6, 0x31, 0x9b, 0x0c, 0x7b, 0x30, 0xde, 0x20, 0x19, 0x2f, 0x2c, - 0x66, 0x43, 0xa4, 0x75, 0xc8, 0x2e, 0x61, 0xf7, 0x14, 0xed, 0x86, 0xb6, 0xd0, 0x36, 0x66, 0x1a, - 0x28, 0x94, 0x1e, 0x3a, 0x96, 0x26, 0xb2, 0x6a, 0x5b, 0x23, 0x34, 0x63, 0x83, 0x6f, 0xfd, 0x13, - 0xfa, 0xf7, 0xf4, 0x5a, 0x28, 0x39, 0xe6, 0x98, 0x93, 0xa8, 0xd5, 0xff, 0x22, 0xa7, 0x32, 0xa3, - 0x5f, 0x76, 0xdd, 0x1f, 0xbe, 0xcd, 0x7c, 0xf3, 0xbe, 0xef, 0xbd, 0xf7, 0xcd, 0x7b, 0xe0, 0x62, - 0x7c, 0xc6, 0xac, 0x80, 0xda, 0xe3, 0xd9, 0x90, 0xc4, 0x21, 0xe1, 0x84, 0xd9, 0x73, 0x12, 0x7a, - 0x34, 0xb6, 0xf3, 0x07, 0x1c, 0x05, 0xb6, 0x17, 0x30, 0x97, 0xce, 0x49, 0xbc, 0xb0, 0xe7, 0xfd, - 0x21, 0xe1, 0xb8, 0x6f, 0xfb, 0x24, 0x24, 0x31, 0xe6, 0xc4, 0xb3, 0xa2, 0x98, 0x72, 0x0a, 0x0f, - 0xb3, 0x68, 0x0b, 0x47, 0x81, 0x55, 0x46, 0x5b, 0x79, 0x74, 0xfb, 0xd8, 0x0f, 0xf8, 0x68, 0x36, - 0xb4, 0x5c, 0x3a, 0xb5, 0x7d, 0xea, 0x53, 0x5b, 0x92, 0x86, 0xb3, 0x6b, 0x79, 0x93, 0x17, 0x79, - 0xca, 0xc4, 0xda, 0xdd, 0x95, 0xd4, 0x2e, 0x8d, 0x89, 0x3d, 0xdf, 0x48, 0xd8, 0x3e, 0xad, 0x62, - 0xa6, 0xd8, 0x1d, 0x05, 0xa1, 0xa8, 0x2e, 0x1a, 0xfb, 0x02, 0x60, 0xf6, 0x94, 0x70, 0xfc, 0x35, - 0x96, 0xfd, 0x2d, 0x56, 0x3c, 0x0b, 0x79, 0x30, 0x25, 0x1b, 0x84, 0xbf, 0x7f, 0x44, 0x60, 0xee, - 0x88, 0x4c, 0xf1, 0x97, 0xbc, 0xee, 0xbb, 0x3a, 0xd0, 0x2e, 0x42, 0x2f, 0xa2, 0x41, 0xc8, 0xe1, - 0x11, 0xd0, 0xb1, 0xe7, 0xc5, 0x84, 0x31, 0xc2, 0x5a, 0x4a, 0xa7, 0xde, 0xd3, 0x9d, 0x66, 0x9a, - 0x98, 0xfa, 0x79, 0x01, 0xa2, 0xea, 0x1d, 0x7a, 0x00, 0xb8, 0x34, 0xf4, 0x02, 0x1e, 0xd0, 0x90, - 0xb5, 0x76, 0x3a, 0x4a, 0xaf, 0x71, 0xf2, 0xa7, 0xf5, 0x3d, 0x7b, 0xad, 0x22, 0xd1, 0x7f, 0x25, - 0xcf, 0x81, 0x37, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5a, 0xd1, 0x85, 0x3d, 0xa0, 0x8d, - 0x28, 0xe3, 0x21, 0x9e, 0x92, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x67, 0x2f, 0x4d, 0x4c, 0xed, 0x61, - 0x8e, 0xa1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x71, 0xec, 0x13, 0x8e, 0xc8, 0x75, 0x6b, 0x57, 0x96, - 0xf3, 0xdb, 0x6a, 0x39, 0xe2, 0x83, 0xac, 0x79, 0xdf, 0xba, 0x1c, 0xbe, 0x26, 0xae, 0x08, 0x22, - 0x31, 0x09, 0x5d, 0x92, 0x75, 0x78, 0x55, 0x30, 0x51, 0x25, 0x02, 0x87, 0x40, 0xe3, 0x34, 0xa2, - 0x13, 0xea, 0x2f, 0x5a, 0x6a, 0xa7, 0xde, 0x6b, 0x9c, 0x9c, 0x6e, 0xd7, 0x9f, 0x75, 0x95, 0xd3, - 0x2e, 0x42, 0x1e, 0x2f, 0x9c, 0xfd, 0xbc, 0x47, 0xad, 0x80, 0x51, 0xa9, 0xdb, 0xfe, 0x17, 0x34, - 0xd7, 0x82, 0xe1, 0x3e, 0xa8, 0x8f, 0xc9, 0xa2, 0xa5, 0x88, 0x5e, 0x91, 0x38, 0xc2, 0x5f, 0x80, - 0x3a, 0xc7, 0x93, 0x19, 0x91, 0x1e, 0xeb, 0x28, 0xbb, 0xfc, 0xb3, 0x73, 0xa6, 0x74, 0xff, 0x02, - 0x70, 0xd3, 0x52, 0x68, 0x02, 0x35, 0x26, 0xd8, 0xcb, 0x34, 0x34, 0x47, 0x4f, 0x13, 0x53, 0x45, - 0x02, 0x40, 0x19, 0xde, 0xfd, 0xa0, 0x80, 0xbd, 0x82, 0x37, 0xa0, 0x31, 0x87, 0x87, 0x60, 0x57, - 0x1a, 0x2c, 0x93, 0x3a, 0x5a, 0x9a, 0x98, 0xbb, 0x4f, 0x85, 0xb9, 0x12, 0x85, 0x0f, 0x80, 0x26, - 0x67, 0xc5, 0xa5, 0x93, 0xac, 0x04, 0xe7, 0x48, 0x34, 0x33, 0xc8, 0xb1, 0xfb, 0xc4, 0xfc, 0x75, - 0x73, 0x0f, 0xac, 0xe2, 0x19, 0x95, 0x64, 0x91, 0x26, 0xa2, 0x31, 0x97, 0xff, 0xa8, 0x66, 0x69, - 0x44, 0x7a, 0x24, 0x51, 0xd8, 0x07, 0x0d, 0x1c, 0x45, 0x05, 0x4d, 0xfe, 0xa0, 0xee, 0xfc, 0x9c, - 0x26, 0x66, 0xe3, 0xbc, 0x82, 0xd1, 0x6a, 0x4c, 0x77, 0xb9, 0x03, 0x9a, 0x45, 0x23, 0xcf, 0x26, - 0x81, 0x4b, 0xe0, 0x2b, 0xa0, 0x89, 0x95, 0xf2, 0x30, 0xc7, 0xb2, 0x9b, 0xf5, 0x91, 0x2c, 0x37, - 0xc3, 0x8a, 0xc6, 0xbe, 0x00, 0x98, 0x25, 0xa2, 0xab, 0xa9, 0x78, 0x42, 0x38, 0xae, 0x46, 0xb2, - 0xc2, 0x50, 0xa9, 0x0a, 0xff, 0x07, 0x8d, 0x7c, 0x07, 0xae, 0x16, 0x11, 0xc9, 0xcb, 0xec, 0xe6, - 0x94, 0xc6, 0x79, 0xf5, 0x74, 0xbf, 0x7e, 0x45, 0xab, 0x34, 0xf8, 0x1c, 0xe8, 0x24, 0x2f, 0x5c, - 0xec, 0x8e, 0x98, 0xad, 0xdf, 0xb7, 0x9b, 0x2d, 0xe7, 0x20, 0xcf, 0xa5, 0x17, 0x08, 0x43, 0x95, - 0x16, 0xbc, 0x04, 0xaa, 0x70, 0x93, 0xb5, 0xea, 0x52, 0xf4, 0x8f, 0xed, 0x44, 0xc5, 0x37, 0x38, - 0xcd, 0x5c, 0x58, 0x15, 0x37, 0x86, 0x32, 0x9d, 0xee, 0x7b, 0x05, 0x1c, 0xac, 0x79, 0xfc, 0x38, - 0x60, 0x1c, 0xbe, 0xdc, 0xf0, 0xd9, 0xda, 0xce, 0x67, 0xc1, 0x96, 0x2e, 0x97, 0x4b, 0x51, 0x20, - 0x2b, 0x1e, 0x0f, 0x80, 0x1a, 0x70, 0x32, 0x2d, 0x9c, 0x39, 0xda, 0xae, 0x09, 0x59, 0x5d, 0xd5, - 0xc5, 0x23, 0xa1, 0x80, 0x32, 0x21, 0xe7, 0xf8, 0x66, 0x69, 0xd4, 0x6e, 0x97, 0x46, 0xed, 0x6e, - 0x69, 0xd4, 0xde, 0xa4, 0x86, 0x72, 0x93, 0x1a, 0xca, 0x6d, 0x6a, 0x28, 0x77, 0xa9, 0xa1, 0x7c, - 0x4c, 0x0d, 0xe5, 0xed, 0x27, 0xa3, 0xf6, 0xe2, 0xa7, 0x5c, 0xf2, 0x73, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x29, 0x1a, 0xa2, 0x6f, 0x6d, 0x06, 0x00, 0x00, + // 798 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8f, 0xe3, 0x44, + 0x10, 0x8d, 0x27, 0x63, 0xc6, 0xee, 0xec, 0x88, 0xdd, 0x16, 0x87, 0x68, 0x58, 0xd9, 0xa3, 0x20, + 0x50, 0xc4, 0x68, 0x6d, 0x66, 0xb5, 0x42, 0x2b, 0x38, 0x8d, 0x61, 0x04, 0x48, 0xb0, 0x1b, 0xf5, + 0x46, 0x42, 0x42, 0x1c, 0xe8, 0xd8, 0xb5, 0x8e, 0x49, 0xec, 0xb6, 0xba, 0x3b, 0x91, 0x72, 0xe3, + 0x1f, 0xc0, 0x7f, 0x42, 0x42, 0x73, 0xdc, 0xe3, 0x9e, 0x2c, 0x62, 0xf8, 0x15, 0x7b, 0x42, 0xdd, + 0xfe, 0x4a, 0x08, 0x1f, 0xb9, 0x75, 0xbf, 0xaa, 0xf7, 0xaa, 0x5e, 0x75, 0x17, 0xba, 0x5d, 0x3c, + 0x15, 0x5e, 0xc2, 0xfc, 0xc5, 0x6a, 0x06, 0x3c, 0x03, 0x09, 0xc2, 0x5f, 0x43, 0x16, 0x31, 0xee, + 0xd7, 0x01, 0x9a, 0x27, 0x7e, 0x94, 0x88, 0x90, 0xad, 0x81, 0x6f, 0xfc, 0xf5, 0xf5, 0x0c, 0x24, + 0xbd, 0xf6, 0x63, 0xc8, 0x80, 0x53, 0x09, 0x91, 0x97, 0x73, 0x26, 0x19, 0x7e, 0x58, 0x65, 0x7b, + 0x34, 0x4f, 0xbc, 0x36, 0xdb, 0xab, 0xb3, 0x2f, 0x1e, 0xc5, 0x89, 0x9c, 0xaf, 0x66, 0x5e, 0xc8, + 0x52, 0x3f, 0x66, 0x31, 0xf3, 0x35, 0x69, 0xb6, 0x7a, 0xa9, 0x6f, 0xfa, 0xa2, 0x4f, 0x95, 0xd8, + 0xc5, 0x68, 0xa7, 0x74, 0xc8, 0x38, 0xf8, 0xeb, 0x83, 0x82, 0x17, 0x4f, 0xba, 0x9c, 0x94, 0x86, + 0xf3, 0x24, 0x53, 0xdd, 0xe5, 0x8b, 0x58, 0x01, 0xc2, 0x4f, 0x41, 0xd2, 0x7f, 0x62, 0xf9, 0xff, + 0xc6, 0xe2, 0xab, 0x4c, 0x26, 0x29, 0x1c, 0x10, 0x3e, 0xfe, 0x3f, 0x82, 0x08, 0xe7, 0x90, 0xd2, + 0xbf, 0xf3, 0x46, 0x7f, 0xf6, 0x91, 0x75, 0x9b, 0x45, 0x39, 0x4b, 0x32, 0x89, 0xaf, 0x90, 0x4d, + 0xa3, 0x88, 0x83, 0x10, 0x20, 0x86, 0xc6, 0x65, 0x7f, 0x6c, 0x07, 0xe7, 0x65, 0xe1, 0xda, 0x37, + 0x0d, 0x48, 0xba, 0x38, 0x8e, 0x10, 0x0a, 0x59, 0x16, 0x25, 0x32, 0x61, 0x99, 0x18, 0x9e, 0x5c, + 0x1a, 0xe3, 0xc1, 0xe3, 0x8f, 0xbc, 0xff, 0x1a, 0xaf, 0xd7, 0x14, 0xfa, 0xac, 0xe5, 0x05, 0xf8, + 0xae, 0x70, 0x7b, 0x65, 0xe1, 0xa2, 0x0e, 0x23, 0x3b, 0xba, 0x78, 0x8c, 0xac, 0x39, 0x13, 0x32, + 0xa3, 0x29, 0x0c, 0xfb, 0x97, 0xc6, 0xd8, 0x0e, 0xee, 0x95, 0x85, 0x6b, 0x7d, 0x59, 0x63, 0xa4, + 0x8d, 0xe2, 0x09, 0xb2, 0x25, 0xe5, 0x31, 0x48, 0x02, 0x2f, 0x87, 0xa7, 0xba, 0x9d, 0xf7, 0x76, + 0xdb, 0x51, 0x0f, 0xe4, 0xad, 0xaf, 0xbd, 0xe7, 0xb3, 0x1f, 0x21, 0x54, 0x49, 0xc0, 0x21, 0x0b, + 0xa1, 0x72, 0x38, 0x6d, 0x98, 0xa4, 0x13, 0xc1, 0x33, 0x64, 0x49, 0x96, 0xb3, 0x25, 0x8b, 0x37, + 0x43, 0xf3, 0xb2, 0x3f, 0x1e, 0x3c, 0x7e, 0x72, 0x9c, 0x3f, 0x6f, 0x5a, 0xd3, 0x6e, 0x33, 0xc9, + 0x37, 0xc1, 0xfd, 0xda, 0xa3, 0xd5, 0xc0, 0xa4, 0xd5, 0x55, 0xfe, 0x32, 0x16, 0xc1, 0x33, 0xe5, + 0xef, 0xad, 0xce, 0xdf, 0xb3, 0x1a, 0x23, 0x6d, 0xf4, 0xe2, 0x53, 0x74, 0xbe, 0x27, 0x8b, 0xef, + 0xa3, 0xfe, 0x02, 0x36, 0x43, 0x43, 0xb1, 0x88, 0x3a, 0xe2, 0x77, 0x90, 0xb9, 0xa6, 0xcb, 0x15, + 0xe8, 0xd7, 0xb0, 0x49, 0x75, 0xf9, 0xe4, 0xe4, 0xa9, 0x31, 0xfa, 0xd9, 0x40, 0xf8, 0x70, 0xfa, + 0xd8, 0x45, 0x26, 0x07, 0x1a, 0x55, 0x22, 0x56, 0x60, 0x97, 0x85, 0x6b, 0x12, 0x05, 0x90, 0x0a, + 0xc7, 0xef, 0xa3, 0x33, 0x01, 0x7c, 0x9d, 0x64, 0xb1, 0xd6, 0xb4, 0x82, 0x41, 0x59, 0xb8, 0x67, + 0x2f, 0x2a, 0x88, 0x34, 0x31, 0x7c, 0x8d, 0x06, 0x12, 0x78, 0x9a, 0x64, 0x54, 0xaa, 0xd4, 0xbe, + 0x4e, 0x7d, 0xbb, 0x2c, 0xdc, 0xc1, 0xb4, 0x83, 0xc9, 0x6e, 0xce, 0xe8, 0x37, 0x03, 0xdd, 0x6b, + 0x3a, 0x9a, 0x30, 0x2e, 0xf1, 0x43, 0x74, 0xaa, 0x5f, 0x59, 0xfb, 0x09, 0xac, 0xb2, 0x70, 0x4f, + 0xf5, 0x04, 0x34, 0x8a, 0xbf, 0x40, 0x96, 0xfe, 0xb0, 0x21, 0x5b, 0x56, 0xee, 0x82, 0x2b, 0x35, + 0xa7, 0x49, 0x8d, 0xbd, 0x29, 0xdc, 0x77, 0x0f, 0x97, 0xd1, 0x6b, 0xc2, 0xa4, 0x25, 0xab, 0x32, + 0x39, 0xe3, 0x52, 0xf7, 0x68, 0x56, 0x65, 0x54, 0x79, 0xa2, 0x51, 0x65, 0x84, 0xe6, 0x79, 0x43, + 0xd3, 0xdf, 0xc8, 0xae, 0x8c, 0xdc, 0x74, 0x30, 0xd9, 0xcd, 0x19, 0x6d, 0x4f, 0xd0, 0x79, 0x63, + 0xe4, 0xc5, 0x32, 0x09, 0x01, 0xff, 0x80, 0x2c, 0xb5, 0xd7, 0x11, 0x95, 0x54, 0xbb, 0xd9, 0xdf, + 0x8b, 0x76, 0x3d, 0xbd, 0x7c, 0x11, 0x2b, 0x40, 0x78, 0x2a, 0xbb, 0xfb, 0x9a, 0xdf, 0x80, 0xa4, + 0xdd, 0x5e, 0x74, 0x18, 0x69, 0x55, 0xf1, 0xe7, 0x68, 0x50, 0x2f, 0xe2, 0x74, 0x93, 0x43, 0xdd, + 0xe6, 0xa8, 0xa6, 0x0c, 0x6e, 0xba, 0xd0, 0x9b, 0xfd, 0x2b, 0xd9, 0xa5, 0xe1, 0x6f, 0x91, 0x0d, + 0x75, 0xe3, 0x6a, 0x81, 0xd5, 0x07, 0xff, 0xe0, 0xb8, 0x0f, 0x1e, 0x3c, 0xa8, 0x6b, 0xd9, 0x0d, + 0x22, 0x48, 0xa7, 0x85, 0x9f, 0x23, 0x53, 0x4d, 0x53, 0x0c, 0xfb, 0x5a, 0xf4, 0xc3, 0xe3, 0x44, + 0xd5, 0x33, 0x04, 0xe7, 0xb5, 0xb0, 0xa9, 0x6e, 0x82, 0x54, 0x3a, 0xa3, 0x5f, 0x0d, 0xf4, 0x60, + 0x6f, 0xc6, 0x5f, 0x27, 0x42, 0xe2, 0xef, 0x0f, 0xe6, 0xec, 0x1d, 0x37, 0x67, 0xc5, 0xd6, 0x53, + 0x6e, 0x37, 0xb3, 0x41, 0x76, 0x66, 0x3c, 0x41, 0x66, 0x22, 0x21, 0x6d, 0x26, 0x73, 0x75, 0x9c, + 0x09, 0xdd, 0x5d, 0xe7, 0xe2, 0x2b, 0xa5, 0x40, 0x2a, 0xa1, 0xe0, 0xd1, 0xdd, 0xd6, 0xe9, 0xbd, + 0xda, 0x3a, 0xbd, 0xd7, 0x5b, 0xa7, 0xf7, 0x53, 0xe9, 0x18, 0x77, 0xa5, 0x63, 0xbc, 0x2a, 0x1d, + 0xe3, 0x75, 0xe9, 0x18, 0xbf, 0x97, 0x8e, 0xf1, 0xcb, 0x1f, 0x4e, 0xef, 0xbb, 0xb3, 0x5a, 0xf2, + 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x35, 0xe6, 0xf5, 0xf2, 0x06, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -270,6 +273,13 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x32 + } if len(m.Topology) > 0 { keysForTopology := make([]string, 0, len(m.Topology)) for k := range m.Topology { @@ -355,6 +365,26 @@ func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i-- + if *m.Terminating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Serving != nil { + i-- + if *m.Serving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.Ready != nil { i-- if *m.Ready { @@ -571,6 +601,10 @@ func (m *Endpoint) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -583,6 +617,12 @@ func (m *EndpointConditions) Size() (n int) { if m.Ready != nil { n += 2 } + if m.Serving != nil { + n += 2 + } + if m.Terminating != nil { + n += 2 + } return n } @@ -678,6 +718,7 @@ func (this *Endpoint) String() string { `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, `Topology:` + mapStringForTopology + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") return s @@ -688,6 +729,8 @@ func (this *EndpointConditions) String() string { } s := strings.Join([]string{`&EndpointConditions{`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `Serving:` + valueToStringGenerated(this.Serving) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1031,7 +1074,7 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1042,16 +1085,46 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } m.Topology[mapkey] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1116,16 +1189,55 @@ func (m *EndpointConditions) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Ready = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Serving = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Terminating = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1294,10 +1406,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1480,10 +1589,7 @@ func (m *EndpointSlice) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1600,10 +1706,7 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index adf10c0e4..e5d21caad 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.discovery.v1beta1; @@ -45,8 +45,8 @@ message Endpoint { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS + // Label (RFC 1123) validation. // +optional optional string hostname = 3; @@ -67,8 +67,15 @@ message Endpoint { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional map topology = 5; + + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + optional string nodeName = 6; } // EndpointConditions represents the current condition of an endpoint. @@ -76,9 +83,25 @@ message EndpointConditions { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional optional bool ready = 1; + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool serving = 2; + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + optional bool terminating = 3; } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 5cafea747..e14088e8b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -60,12 +60,6 @@ type EndpointSlice struct { type AddressType string const ( - // AddressTypeIP represents an IP Address. - // This address type has been deprecated and has been replaced by the IPv4 - // and IPv6 adddress types. New resources with this address type will be - // considered invalid. This will be fully removed in 1.18. - // +deprecated - AddressTypeIP = AddressType("IP") // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) // AddressTypeIPv6 represents an IPv6 Address. @@ -88,8 +82,8 @@ type Endpoint struct { // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered - // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) - // validation. + // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS + // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // targetRef is a reference to a Kubernetes object that represents this @@ -108,8 +102,14 @@ type Endpoint struct { // endpoint is located. This should match the corresponding node label. // * topology.kubernetes.io/region: the value indicates the region where the // endpoint is located. This should match the corresponding node label. + // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can + // be used to determine endpoints local to a Node. This field can be enabled + // with the EndpointSliceNodeName feature gate. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` } // EndpointConditions represents the current condition of an endpoint. @@ -117,9 +117,25 @@ type EndpointConditions struct { // ready indicates that this endpoint is prepared to receive traffic, // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. + // unknown state as ready. For compatibility reasons, ready should never be + // "true" for terminating endpoints. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` + + // serving is identical to ready except that it is set regardless of the + // terminating state of endpoints. This condition should be set to true for + // a ready endpoint that is terminating. If nil, consumers should defer to + // the ready condition. This field can be enabled with the + // EndpointSliceTerminatingCondition feature gate. + // +optional + Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` + + // terminating indicates that this endpoint is terminating. A nil value + // indicates an unknown state. Consumers should interpret this unknown state + // to mean that the endpoint is not terminating. This field can be enabled + // with the EndpointSliceTerminatingCondition feature gate. + // +optional + Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointPort represents a Port used by an EndpointSlice diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index d67cc7214..d48b93d8b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -31,9 +31,10 @@ var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", "conditions": "conditions contains information about the current status of the endpoint.", - "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", - "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", } func (Endpoint) SwaggerDoc() map[string]string { @@ -41,8 +42,10 @@ func (Endpoint) SwaggerDoc() map[string]string { } var map_EndpointConditions = map[string]string{ - "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", } func (EndpointConditions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go index 8490ec73f..7076553d2 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -51,6 +51,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { (*out)[key] = val } } + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } return } @@ -72,6 +77,16 @@ func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { *out = new(bool) **out = **in } + if in.Serving != nil { + in, out := &in.Serving, &out.Serving + *out = new(bool) + **out = **in + } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/api/events/v1/generated.pb.go b/vendor/k8s.io/api/events/v1/generated.pb.go index 717137cff..70ad588a6 100644 --- a/vendor/k8s.io/api/events/v1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1/generated.pb.go @@ -1077,10 +1077,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1197,10 +1194,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1302,10 +1296,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto index 18e3d0182..690c99e4c 100644 --- a/vendor/k8s.io/api/events/v1/generated.proto +++ b/vendor/k8s.io/api/events/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.events.v1; @@ -30,8 +30,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "v1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. @@ -43,22 +47,18 @@ message Event { // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. // This field cannot be empty for new Events. - // +optional optional string reportingController = 4; // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. // This field cannot be empty for new Events and it can have at most 128 characters. - // +optional optional string reportingInstance = 5; // action is what action was taken/failed regarding to the regarding object. It is machine-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. optional string action = 6; // reason is why the action was taken. It is human-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. optional string reason = 7; // regarding contains the object this Event is about. In most cases it's an Object reporting controller @@ -80,7 +80,7 @@ message Event { // type is the type of this event (Normal, Warning), new types could be added in the future. // It is machine-readable. - // +optional + // This field cannot be empty for new Events. optional string type = 11; // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go index 07ede5542..4bf715872 100644 --- a/vendor/k8s.io/api/events/v1/types.go +++ b/vendor/k8s.io/api/events/v1/types.go @@ -25,10 +25,15 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // eventTime is the time when this Event was first observed. It is required. EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` @@ -39,22 +44,18 @@ type Event struct { // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. // This field cannot be empty for new Events. - // +optional ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. // This field cannot be empty for new Events and it can have at most 128 characters. - // +optional ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // action is what action was taken/failed regarding to the regarding object. It is machine-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // reason is why the action was taken. It is human-readable. - // This field can have at most 128 characters. - // +optional + // This field cannot be empty for new Events and it can have at most 128 characters. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` // regarding contains the object this Event is about. In most cases it's an Object reporting controller @@ -76,7 +77,7 @@ type Event struct { // type is the type of this event (Normal, Warning), new types could be added in the future. // It is machine-readable. - // +optional + // This field cannot be empty for new Events. Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index e0467436e..7255727bb 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -28,17 +28,17 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "eventTime": "eventTime is the time when this Event was first observed. It is required.", "series": "series is data about the Event series this event represents or nil if it's a singleton Event.", "reportingController": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", "reportingInstance": "reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.", - "action": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters.", - "reason": "reason is why the action was taken. It is human-readable. This field can have at most 128 characters.", + "action": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "reason": "reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.", "regarding": "regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", "related": "related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", "note": "note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", - "type": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable.", + "type": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.", "deprecatedSource": "deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.", "deprecatedFirstTimestamp": "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", "deprecatedLastTimestamp": "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index 3709ef633..d92411bc8 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -1077,10 +1077,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1197,10 +1194,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1302,10 +1296,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index 79bde87c4..90b57d8d0 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.events.v1beta1; @@ -30,8 +30,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "v1beta1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. message Event { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index e2ed214b0..796e56ea7 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -27,10 +27,15 @@ import ( // +k8s:prerelease-lifecycle-gen:deprecated=1.22 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +// Events have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. type Event struct { metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // eventTime is the time when this Event was first observed. It is required. EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 8c987f899..7f8e162cd 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", "eventTime": "eventTime is the time when this Event was first observed. It is required.", "series": "series is data about the Event series this event represents or nil if it's a singleton Event.", "reportingController": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index bd37f432c..f1d82c0fd 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -6750,10 +6750,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6835,10 +6832,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6940,10 +6934,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7092,10 +7083,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7306,10 +7294,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7426,10 +7411,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7639,10 +7621,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7898,10 +7877,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8019,10 +7995,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8171,10 +8144,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8418,10 +8388,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8538,10 +8505,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8733,7 +8697,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -8783,10 +8747,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9073,10 +9034,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9294,10 +9252,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9415,10 +9370,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9534,10 +9486,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9685,10 +9634,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9772,10 +9718,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9863,10 +9806,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9954,10 +9894,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10071,10 +10008,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10223,10 +10157,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10377,10 +10308,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10497,10 +10425,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10615,10 +10540,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10704,10 +10626,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10894,10 +10813,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10980,10 +10896,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11097,10 +11010,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11216,10 +11126,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11337,10 +11244,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11458,10 +11362,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11578,10 +11479,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11739,10 +11637,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11861,10 +11756,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -12047,10 +11939,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -12166,10 +12055,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -12286,10 +12172,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13045,10 +12928,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13197,10 +13077,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13411,10 +13288,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13531,10 +13405,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13692,10 +13563,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13874,10 +13742,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -13946,10 +13811,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14035,10 +13897,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14160,10 +14019,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14279,10 +14135,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14398,10 +14251,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14516,10 +14366,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14637,10 +14484,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14789,10 +14633,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -14861,10 +14702,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -15043,7 +14881,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -15092,10 +14930,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -15211,10 +15046,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index a81cce680..a4ca5b563 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.extensions.v1beta1; diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go index 31b8b5d53..a3d4d0c60 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go index 86c861204..7f0687ac0 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go @@ -2513,10 +2513,7 @@ func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2665,10 +2662,7 @@ func (m *FlowSchema) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2879,10 +2873,7 @@ func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2999,10 +2990,7 @@ func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3174,10 +3162,7 @@ func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3261,10 +3246,7 @@ func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3346,10 +3328,7 @@ func (m *GroupSubject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3467,10 +3446,7 @@ func (m *LimitResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3572,10 +3548,7 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3689,10 +3662,7 @@ func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3844,10 +3814,7 @@ func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3996,10 +3963,7 @@ func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4210,10 +4174,7 @@ func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4330,10 +4291,7 @@ func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4415,10 +4373,7 @@ func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4536,10 +4491,7 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4623,10 +4575,7 @@ func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4733,10 +4682,7 @@ func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4934,10 +4880,7 @@ func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5051,10 +4994,7 @@ func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5244,10 +5184,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5329,10 +5266,7 @@ func (m *UserSubject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 0801dd6c1..7b19a273e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.flowcontrol.v1alpha1; diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index a67c6dd02..1e9701fc3 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -51,9 +51,19 @@ const ( FlowSchemaMaxMatchingPrecedence int32 = 10000 ) +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -74,6 +84,10 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -321,6 +335,10 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -340,6 +358,10 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..4152aa2a9 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,121 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchemaList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfigurationList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go new file mode 100644 index 000000000..50897b7eb --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1beta1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1beta1 // import "k8s.io/api/flowcontrol/v1beta1" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go new file mode 100644 index 000000000..cb06fe5e7 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go @@ -0,0 +1,5367 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_80171c2a4e3669de, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_80171c2a4e3669de) +} + +var fileDescriptor_80171c2a4e3669de = []byte{ + // 1494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, + 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, + 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, + 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, + 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, + 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, + 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, + 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, + 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, + 0x84, 0x19, 0xe7, 0xca, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, + 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, + 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, + 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0xd5, 0x97, 0xc4, 0x6a, 0x79, 0x98, + 0x97, 0xe7, 0xdb, 0xcc, 0x6a, 0x92, 0x3e, 0x87, 0xb7, 0xd2, 0x1c, 0xa8, 0x59, 0x27, 0x4d, 0xa3, + 0xd7, 0x4f, 0xbd, 0x03, 0x2b, 0x1f, 0x37, 0x9c, 0x83, 0x2b, 0x16, 0x65, 0x96, 0x5d, 0xf3, 0x2d, + 0x5a, 0x27, 0xde, 0x16, 0x61, 0x75, 0xa7, 0x8a, 0x3e, 0x80, 0x2c, 0x3b, 0x74, 0x49, 0x41, 0x59, + 0x53, 0x5e, 0xcb, 0xeb, 0xa7, 0x1e, 0xb5, 0x4b, 0x13, 0x9d, 0x76, 0x29, 0x7b, 0xeb, 0xd0, 0x25, + 0xcf, 0xda, 0xa5, 0xe3, 0x43, 0xdc, 0xb8, 0x1a, 0x0b, 0x47, 0xf5, 0xfb, 0x0c, 0x00, 0xb7, 0xda, + 0x15, 0xa1, 0xd1, 0x3d, 0x98, 0xe1, 0xe5, 0x56, 0x0d, 0x66, 0x08, 0xcc, 0xd9, 0xf3, 0x67, 0xb5, + 0xb8, 0xd7, 0x51, 0xd6, 0x9a, 0xbb, 0x5f, 0xe3, 0x02, 0xaa, 0x71, 0x6b, 0xad, 0x75, 0x4e, 0xbb, + 0x51, 0xb9, 0x4f, 0x4c, 0xb6, 0x45, 0x98, 0xa1, 0x23, 0x99, 0x05, 0xc4, 0x32, 0x1c, 0xa1, 0xa2, + 0x1d, 0xc8, 0x52, 0x97, 0x98, 0x85, 0x8c, 0x40, 0xd7, 0xb4, 0xd1, 0x27, 0xa9, 0xc5, 0xb9, 0xed, + 0xba, 0xc4, 0xd4, 0xe7, 0xc2, 0x0a, 0xf9, 0x17, 0x16, 0x48, 0xe8, 0x0b, 0x98, 0xa2, 0xcc, 0x60, + 0x3e, 0x2d, 0x4c, 0xf6, 0x65, 0x9c, 0x86, 0x29, 0xfc, 0xf4, 0x05, 0x89, 0x3a, 0x15, 0x7c, 0x63, + 0x89, 0xa7, 0x3e, 0xc9, 0xc0, 0x72, 0x6c, 0xbc, 0xe1, 0xd8, 0x55, 0x8b, 0x59, 0x8e, 0x8d, 0xde, + 0x4d, 0x74, 0xfd, 0x64, 0x4f, 0xd7, 0x57, 0x06, 0xb8, 0xc4, 0x1d, 0x47, 0x6f, 0x47, 0xe9, 0x66, + 0x84, 0xfb, 0x89, 0x64, 0xf0, 0x67, 0xed, 0xd2, 0x62, 0xe4, 0x96, 0xcc, 0x07, 0xb5, 0x00, 0x35, + 0x0c, 0xca, 0x6e, 0x79, 0x86, 0x4d, 0x03, 0x58, 0xab, 0x49, 0x64, 0xd5, 0x6f, 0x8c, 0x77, 0x4e, + 0xdc, 0x43, 0x5f, 0x95, 0x21, 0xd1, 0x66, 0x1f, 0x1a, 0x1e, 0x10, 0x01, 0xbd, 0x0a, 0x53, 0x1e, + 0x31, 0xa8, 0x63, 0x17, 0xb2, 0x22, 0xe5, 0xa8, 0x5f, 0x58, 0x48, 0xb1, 0xd4, 0xa2, 0xd7, 0x61, + 0xba, 0x49, 0x28, 0x35, 0x6a, 0xa4, 0x90, 0x13, 0x86, 0x8b, 0xd2, 0x70, 0x7a, 0x2b, 0x10, 0xe3, + 0x50, 0xaf, 0xfe, 0xa2, 0xc0, 0x42, 0xdc, 0xa7, 0x4d, 0x8b, 0x32, 0x74, 0xb7, 0x8f, 0x7b, 0xda, + 0x78, 0x35, 0x71, 0x6f, 0xc1, 0xbc, 0x25, 0x19, 0x6e, 0x26, 0x94, 0x74, 0xf1, 0xee, 0x06, 0xe4, + 0x2c, 0x46, 0x9a, 0xbc, 0xeb, 0x93, 0x3d, 0xed, 0x4a, 0x21, 0x89, 0x3e, 0x2f, 0x61, 0x73, 0xd7, + 0x38, 0x00, 0x0e, 0x70, 0xd4, 0xbf, 0x26, 0xbb, 0x2b, 0xe0, 0x7c, 0x44, 0x3f, 0x29, 0xb0, 0xea, + 0x7a, 0x96, 0xe3, 0x59, 0xec, 0x70, 0x93, 0xb4, 0x48, 0x63, 0xc3, 0xb1, 0xf7, 0xac, 0x9a, 0xef, + 0x19, 0xbc, 0x95, 0xb2, 0xa8, 0x8d, 0xb4, 0xc8, 0x3b, 0x43, 0x11, 0x30, 0xd9, 0x23, 0x1e, 0xb1, + 0x4d, 0xa2, 0xab, 0x32, 0xa5, 0xd5, 0x11, 0xc6, 0x23, 0x52, 0x41, 0x9f, 0x02, 0x6a, 0x1a, 0x8c, + 0x77, 0xb4, 0xb6, 0xe3, 0x11, 0x93, 0x54, 0x39, 0xaa, 0x20, 0x64, 0x2e, 0x66, 0xc7, 0x56, 0x9f, + 0x05, 0x1e, 0xe0, 0x85, 0xbe, 0x55, 0x60, 0xb9, 0xda, 0x3f, 0x64, 0x24, 0x2f, 0x2f, 0x8d, 0xd3, + 0xe8, 0x01, 0x33, 0x4a, 0x5f, 0xe9, 0xb4, 0x4b, 0xcb, 0x03, 0x14, 0x78, 0x50, 0x30, 0x74, 0x17, + 0x72, 0x9e, 0xdf, 0x20, 0xb4, 0x90, 0x15, 0xc7, 0x9b, 0x1a, 0x75, 0xc7, 0x69, 0x58, 0xe6, 0x21, + 0xe6, 0x2e, 0x9f, 0x5b, 0xac, 0xbe, 0xeb, 0x8b, 0x59, 0x45, 0xe3, 0xb3, 0x16, 0x2a, 0x1c, 0x80, + 0xaa, 0x0f, 0x61, 0xa9, 0x77, 0x68, 0xa0, 0x1a, 0x80, 0x19, 0xde, 0x53, 0x5a, 0x50, 0x44, 0xd8, + 0x37, 0xc7, 0x67, 0x55, 0x74, 0xc7, 0xe3, 0x79, 0x19, 0x89, 0x28, 0xee, 0x82, 0x56, 0xcf, 0xc2, + 0xdc, 0x55, 0xcf, 0xf1, 0x5d, 0x99, 0x23, 0x5a, 0x83, 0xac, 0x6d, 0x34, 0xc3, 0xe9, 0x13, 0x4d, + 0xc4, 0x6d, 0xa3, 0x49, 0xb0, 0xd0, 0xa8, 0x3f, 0x2a, 0x30, 0xbf, 0x69, 0x35, 0x2d, 0x86, 0x09, + 0x75, 0x1d, 0x9b, 0x12, 0x74, 0x31, 0x31, 0xb1, 0x4e, 0xf4, 0x4c, 0xac, 0x23, 0x09, 0xe3, 0xae, + 0x59, 0xf5, 0x25, 0x4c, 0x3f, 0xf0, 0x89, 0x6f, 0xd9, 0x35, 0x39, 0xaf, 0x2f, 0xa4, 0x15, 0x78, + 0x33, 0x30, 0x4f, 0xb0, 0x4d, 0x9f, 0xe5, 0x23, 0x40, 0x6a, 0x70, 0x88, 0xa8, 0xfe, 0xad, 0xc0, + 0x09, 0x11, 0x98, 0x54, 0x87, 0xb3, 0x18, 0xdd, 0x85, 0x82, 0x41, 0xa9, 0xef, 0x91, 0xea, 0x86, + 0x63, 0x9b, 0xbe, 0xc7, 0xf9, 0x7f, 0xb8, 0x5b, 0x37, 0x3c, 0x42, 0x45, 0x35, 0x39, 0x7d, 0x4d, + 0x56, 0x53, 0x58, 0x1f, 0x62, 0x87, 0x87, 0x22, 0xa0, 0xfb, 0x30, 0xdf, 0xe8, 0xae, 0x5d, 0x96, + 0x79, 0x26, 0xad, 0xcc, 0x44, 0xc3, 0xf4, 0x63, 0x32, 0x83, 0x64, 0xd3, 0x71, 0x12, 0x5a, 0x3d, + 0x80, 0x63, 0xdb, 0xfc, 0x0e, 0x53, 0xc7, 0xf7, 0x4c, 0x12, 0x13, 0x10, 0x95, 0x20, 0xd7, 0x22, + 0x5e, 0x25, 0x20, 0x51, 0x5e, 0xcf, 0x73, 0xfa, 0x7d, 0xc6, 0x05, 0x38, 0x90, 0xa3, 0xf7, 0x60, + 0xd1, 0x8e, 0x3d, 0x6f, 0xe3, 0x4d, 0x5a, 0x98, 0x12, 0xa6, 0xcb, 0x9d, 0x76, 0x69, 0x71, 0x3b, + 0xa9, 0xc2, 0xbd, 0xb6, 0x6a, 0x3b, 0x03, 0x2b, 0x43, 0xf8, 0x8e, 0x6e, 0xc3, 0x0c, 0x95, 0xbf, + 0x25, 0x87, 0x4f, 0xa6, 0xd5, 0x2e, 0x7d, 0xe3, 0x69, 0x1b, 0x82, 0xe1, 0x08, 0x0a, 0x39, 0x30, + 0xef, 0xc9, 0x14, 0x44, 0x4c, 0x39, 0x75, 0xcf, 0xa7, 0x61, 0xf7, 0x77, 0x27, 0x6e, 0x2e, 0xee, + 0x06, 0xc4, 0x49, 0x7c, 0xf4, 0x10, 0x96, 0xba, 0xca, 0x0e, 0x62, 0x4e, 0x8a, 0x98, 0x17, 0xd3, + 0x62, 0x0e, 0x3c, 0x14, 0xbd, 0x20, 0xc3, 0x2e, 0x6d, 0xf7, 0xc0, 0xe2, 0xbe, 0x40, 0xea, 0x6f, + 0x19, 0x18, 0x31, 0x88, 0x5f, 0xc2, 0x52, 0x75, 0x2f, 0xb1, 0x54, 0xbd, 0xff, 0xfc, 0x2f, 0xcc, + 0xd0, 0x25, 0xab, 0xde, 0xb3, 0x64, 0x7d, 0xf8, 0x02, 0x31, 0x46, 0x2f, 0x5d, 0xff, 0x64, 0xe0, + 0x95, 0xe1, 0xce, 0xf1, 0x12, 0x76, 0x3d, 0x31, 0xd2, 0x2e, 0xf5, 0x8c, 0xb4, 0x93, 0x63, 0x40, + 0xfc, 0xbf, 0x94, 0xf5, 0x2c, 0x65, 0xbf, 0x2b, 0x50, 0x1c, 0xde, 0xb7, 0x97, 0xb0, 0xa4, 0x7d, + 0x95, 0x5c, 0xd2, 0xde, 0x79, 0x7e, 0x92, 0x0d, 0x59, 0xda, 0xae, 0x8e, 0xe2, 0x56, 0xb4, 0x5e, + 0x8d, 0xf1, 0xc4, 0xfe, 0x3a, 0xb2, 0x55, 0x62, 0x1b, 0x4c, 0xf9, 0x97, 0x90, 0xf0, 0xfe, 0xc8, + 0x36, 0x2a, 0x0d, 0xd2, 0x24, 0x36, 0x93, 0x84, 0xac, 0xc3, 0x74, 0x23, 0x78, 0x1b, 0xe5, 0xa5, + 0x5e, 0x1f, 0xeb, 0x49, 0x1a, 0xf5, 0x94, 0x06, 0xcf, 0xb0, 0x34, 0xc3, 0x21, 0xbc, 0xfa, 0x83, + 0x02, 0x6b, 0x69, 0x97, 0x15, 0x1d, 0x0c, 0x58, 0x76, 0x5e, 0x60, 0x91, 0x1d, 0x7f, 0xf9, 0xf9, + 0x59, 0x81, 0xa3, 0x83, 0x76, 0x0a, 0x4e, 0x7f, 0xbe, 0x48, 0x44, 0x5b, 0x40, 0x44, 0xff, 0x9b, + 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x86, 0x99, 0xba, 0x61, 0x57, 0x77, 0xad, 0xaf, 0xc3, 0xfd, 0x36, + 0x22, 0xe0, 0x27, 0x52, 0x8e, 0x23, 0x0b, 0x74, 0x05, 0x96, 0x84, 0xdf, 0x26, 0xb1, 0x6b, 0xac, + 0x2e, 0x7a, 0x25, 0xae, 0x72, 0x2e, 0x7e, 0x0f, 0x6e, 0xf6, 0xe8, 0x71, 0x9f, 0x87, 0xfa, 0xaf, + 0x02, 0xe8, 0x79, 0xde, 0xf9, 0x53, 0x90, 0x37, 0x5c, 0x4b, 0x2c, 0x7b, 0xc1, 0x15, 0xc8, 0xeb, + 0xf3, 0x9d, 0x76, 0x29, 0xbf, 0xbe, 0x73, 0x2d, 0x10, 0xe2, 0x58, 0xcf, 0x8d, 0xc3, 0x27, 0x30, + 0x78, 0xea, 0xa4, 0x71, 0x18, 0x98, 0xe2, 0x58, 0x8f, 0x2e, 0xc3, 0x9c, 0xd9, 0xf0, 0x29, 0x23, + 0xde, 0xae, 0xe9, 0xb8, 0x44, 0x8c, 0x8c, 0x19, 0xfd, 0xa8, 0xac, 0x69, 0x6e, 0xa3, 0x4b, 0x87, + 0x13, 0x96, 0x48, 0x03, 0xe0, 0x84, 0xa7, 0xae, 0xc1, 0xe3, 0xe4, 0x44, 0x9c, 0x05, 0x7e, 0x60, + 0xdb, 0x91, 0x14, 0x77, 0x59, 0xa8, 0xf7, 0xe1, 0xd8, 0x2e, 0xf1, 0x5a, 0x96, 0x49, 0xd6, 0x4d, + 0xd3, 0xf1, 0x6d, 0x16, 0xae, 0xad, 0x65, 0xc8, 0x47, 0x66, 0xf2, 0x4e, 0x1c, 0x91, 0xf1, 0xf3, + 0x11, 0x16, 0x8e, 0x6d, 0xa2, 0x4b, 0x98, 0x19, 0x7e, 0x09, 0x33, 0x30, 0x1d, 0xc3, 0x67, 0xf7, + 0x2d, 0xbb, 0x2a, 0x91, 0x8f, 0x87, 0xd6, 0xd7, 0x2d, 0xbb, 0xfa, 0xac, 0x5d, 0x9a, 0x95, 0x66, + 0xfc, 0x13, 0x0b, 0x43, 0x74, 0x0d, 0xb2, 0x3e, 0x25, 0x9e, 0xbc, 0x5e, 0xa7, 0xd2, 0xc8, 0x7c, + 0x9b, 0x12, 0x2f, 0xdc, 0x7c, 0x66, 0x38, 0x32, 0x17, 0x60, 0x01, 0x81, 0xb6, 0x20, 0x57, 0xe3, + 0x87, 0x22, 0xa7, 0xfe, 0xe9, 0x34, 0xac, 0xee, 0x75, 0x3e, 0xa0, 0x81, 0x90, 0xe0, 0x00, 0x05, + 0x3d, 0x80, 0x05, 0x9a, 0x68, 0xa1, 0x38, 0xae, 0x31, 0x36, 0x99, 0x81, 0x8d, 0xd7, 0x51, 0xa7, + 0x5d, 0x5a, 0x48, 0xaa, 0x70, 0x4f, 0x00, 0xb5, 0x0c, 0xb3, 0x5d, 0x05, 0xa6, 0xcf, 0x3f, 0xfd, + 0xcc, 0xa3, 0xa7, 0xc5, 0x89, 0xc7, 0x4f, 0x8b, 0x13, 0x4f, 0x9e, 0x16, 0x27, 0xbe, 0xe9, 0x14, + 0x95, 0x47, 0x9d, 0xa2, 0xf2, 0xb8, 0x53, 0x54, 0x9e, 0x74, 0x8a, 0xca, 0x1f, 0x9d, 0xa2, 0xf2, + 0xdd, 0x9f, 0xc5, 0x89, 0x3b, 0xd3, 0x32, 0xb3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3c, 0xd4, + 0x36, 0xaf, 0xfa, 0x13, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto new file mode 100644 index 000000000..9ddfc5465 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -0,0 +1,434 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // Required + // +unionDiscriminator + optional string kind = 1; + + // +optional + optional UserSubject user = 2; + + // +optional + optional GroupSubject group = 3; + + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/register.go b/vendor/k8s.io/api/flowcontrol/v1beta1/register.go new file mode 100644 index 000000000..e78549314 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go new file mode 100644 index 000000000..ece834e92 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -0,0 +1,529 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..8343a883f --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "Required", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..c8f6e2306 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,541 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..d0f229718 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,93 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 23 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 26 +} diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 4e03b5438..49238823b 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -2723,10 +2723,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2810,10 +2807,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2927,10 +2921,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3079,10 +3070,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3204,10 +3192,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3323,10 +3308,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3443,10 +3425,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3564,10 +3543,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3684,10 +3660,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3802,10 +3775,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3891,10 +3861,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4009,10 +3976,7 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4199,10 +4163,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4285,10 +4246,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4402,10 +4360,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4521,10 +4476,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4642,10 +4594,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4763,10 +4712,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4883,10 +4829,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5044,10 +4987,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5166,10 +5106,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5352,10 +5289,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5456,10 +5390,7 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index a98ef94c8..74737098b 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.networking.v1; diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 6f51df864..fa921b619 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -1606,10 +1606,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1693,10 +1690,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1845,10 +1839,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1999,10 +1990,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2118,10 +2106,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2238,10 +2223,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2359,10 +2341,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2479,10 +2458,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2597,10 +2573,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2686,10 +2659,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2876,10 +2846,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2962,10 +2929,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3079,10 +3043,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index a97c318db..251bbafec 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.networking.v1beta1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1/doc.go similarity index 82% rename from vendor/k8s.io/api/settings/v1alpha1/doc.go rename to vendor/k8s.io/api/node/v1/doc.go index 60066bb6d..12cbcb8a0 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/node/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +groupName=settings.k8s.io +// +groupName=node.k8s.io -package v1alpha1 // import "k8s.io/api/settings/v1alpha1" +package v1 // import "k8s.io/api/node/v1" diff --git a/vendor/k8s.io/api/node/v1/generated.pb.go b/vendor/k8s.io/api/node/v1/generated.pb.go new file mode 100644 index 000000000..d930f63b2 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/generated.pb.go @@ -0,0 +1,1399 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_6ac9be560e26ae98, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto", fileDescriptor_6ac9be560e26ae98) +} + +var fileDescriptor_6ac9be560e26ae98 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xce, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x94, 0xa3, 0x43, 0x14, 0x21, 0x27, 0xca, 0x14, 0x90, + 0x7a, 0x6e, 0x2b, 0x84, 0x2a, 0x18, 0x90, 0x0c, 0xad, 0x40, 0x82, 0x02, 0x2e, 0x2c, 0x88, 0x81, + 0x8b, 0xfd, 0x70, 0xdc, 0xc4, 0xbe, 0xe8, 0x7c, 0x8e, 0xc8, 0x86, 0x58, 0x90, 0x98, 0xfa, 0x5f, + 0x18, 0xf8, 0x0b, 0x15, 0x53, 0xc7, 0x4e, 0x2d, 0x0d, 0xff, 0x82, 0x09, 0xdd, 0xd9, 0x4e, 0x5c, + 0x12, 0x42, 0xd9, 0xee, 0xde, 0x7d, 0xdf, 0xf7, 0xde, 0xfb, 0xde, 0x3d, 0x7c, 0xbf, 0xbb, 0x1d, + 0x51, 0x9f, 0x9b, 0xdd, 0xb8, 0x0d, 0x22, 0x04, 0x09, 0x91, 0x39, 0x80, 0xd0, 0xe5, 0xc2, 0x4c, + 0x1f, 0x58, 0xdf, 0x37, 0x43, 0xee, 0x82, 0x39, 0xd8, 0x34, 0x3d, 0x08, 0x41, 0x30, 0x09, 0x2e, + 0xed, 0x0b, 0x2e, 0x39, 0x21, 0x09, 0x86, 0xb2, 0xbe, 0x4f, 0x15, 0x86, 0x0e, 0x36, 0x6b, 0xeb, + 0x9e, 0x2f, 0x3b, 0x71, 0x9b, 0x3a, 0x3c, 0x30, 0x3d, 0xee, 0x71, 0x53, 0x43, 0xdb, 0xf1, 0x7b, + 0x7d, 0xd3, 0x17, 0x7d, 0x4a, 0x24, 0x6a, 0xcd, 0x5c, 0x1a, 0x87, 0x8b, 0x59, 0x69, 0x6a, 0x77, + 0x26, 0x98, 0x80, 0x39, 0x1d, 0x3f, 0x04, 0x31, 0x34, 0xfb, 0x5d, 0x4f, 0x93, 0x04, 0x44, 0x3c, + 0x16, 0x0e, 0xfc, 0x17, 0x2b, 0x32, 0x03, 0x90, 0x6c, 0x56, 0x2e, 0xf3, 0x6f, 0x2c, 0x11, 0x87, + 0xd2, 0x0f, 0xa6, 0xd3, 0xdc, 0xfd, 0x17, 0x21, 0x72, 0x3a, 0x10, 0xb0, 0x3f, 0x79, 0xcd, 0xef, + 0x45, 0x5c, 0x7a, 0x3e, 0x00, 0xd1, 0x01, 0xe6, 0x92, 0x63, 0x84, 0x4b, 0x7d, 0xee, 0xee, 0xfa, + 0x1f, 0xc0, 0xad, 0xa2, 0xc6, 0x42, 0xab, 0xbc, 0x75, 0x9b, 0x4e, 0x9b, 0x4b, 0x33, 0x02, 0x7d, + 0x91, 0x82, 0x77, 0x42, 0x29, 0x86, 0xd6, 0x67, 0x74, 0x74, 0x5a, 0x2f, 0x8c, 0x4e, 0xeb, 0xa5, + 0x2c, 0xfe, 0xeb, 0xb4, 0x5e, 0x9f, 0x76, 0x96, 0xda, 0xa9, 0x59, 0x4f, 0xfd, 0x48, 0x7e, 0x3a, + 0x9b, 0x0b, 0xd9, 0x63, 0x01, 0x7c, 0x39, 0xab, 0xaf, 0x5f, 0xc6, 0x7b, 0xfa, 0x32, 0x66, 0xa1, + 0xf4, 0xe5, 0xd0, 0x1e, 0x77, 0x51, 0xeb, 0xe2, 0x95, 0x0b, 0x45, 0x92, 0x55, 0xbc, 0xd0, 0x85, + 0x61, 0x15, 0x35, 0x50, 0x6b, 0xd9, 0x56, 0x47, 0xf2, 0x08, 0x2f, 0x0e, 0x58, 0x2f, 0x86, 0x6a, + 0xb1, 0x81, 0x5a, 0xe5, 0x2d, 0x9a, 0xeb, 0x78, 0x9c, 0x8b, 0xf6, 0xbb, 0x9e, 0xb6, 0x60, 0x3a, + 0x57, 0x42, 0xbe, 0x57, 0xdc, 0x46, 0xcd, 0xaf, 0x45, 0x5c, 0xb1, 0x13, 0xbf, 0x1f, 0xf6, 0x58, + 0x14, 0x91, 0x77, 0xb8, 0xa4, 0x26, 0xec, 0x32, 0xc9, 0x74, 0xc6, 0xf2, 0xd6, 0xc6, 0x3c, 0xf5, + 0x88, 0x2a, 0xb4, 0x76, 0xb8, 0x7d, 0x00, 0x8e, 0x7c, 0x06, 0x92, 0x59, 0x24, 0x35, 0x15, 0x4f, + 0x62, 0xf6, 0x58, 0x95, 0xdc, 0xc2, 0x4b, 0x1d, 0x16, 0xba, 0x3d, 0x10, 0xba, 0xfc, 0x65, 0xeb, + 0x5a, 0x0a, 0x5f, 0x7a, 0x9c, 0x84, 0xed, 0xec, 0x9d, 0xec, 0xe2, 0x12, 0x4f, 0x07, 0x57, 0x5d, + 0xd0, 0xc5, 0xdc, 0x9c, 0x37, 0x5c, 0xab, 0xa2, 0x26, 0x99, 0xdd, 0xec, 0x31, 0x97, 0xec, 0x61, + 0xac, 0x3e, 0x93, 0x1b, 0xf7, 0xfc, 0xd0, 0xab, 0x5e, 0xd1, 0x4a, 0xc6, 0x2c, 0xa5, 0xfd, 0x31, + 0xca, 0xba, 0xaa, 0x1a, 0x98, 0xdc, 0xed, 0x9c, 0x42, 0xf3, 0x1b, 0xc2, 0xab, 0x79, 0xd7, 0xd4, + 0xaf, 0x20, 0x6f, 0xa7, 0x9c, 0xa3, 0x97, 0x73, 0x4e, 0xb1, 0xb5, 0x6f, 0xab, 0xd9, 0x67, 0xcc, + 0x22, 0x39, 0xd7, 0x76, 0xf0, 0xa2, 0x2f, 0x21, 0x88, 0xaa, 0x45, 0xfd, 0xc9, 0x1b, 0xb3, 0xaa, + 0xcf, 0x97, 0x64, 0xad, 0xa4, 0x62, 0x8b, 0x4f, 0x14, 0xcd, 0x4e, 0xd8, 0xcd, 0xc3, 0x22, 0xce, + 0x35, 0x45, 0x0e, 0x70, 0x45, 0x91, 0xf7, 0xa1, 0x07, 0x8e, 0xe4, 0x22, 0xdd, 0xa0, 0x8d, 0xf9, + 0xd6, 0xd0, 0xbd, 0x1c, 0x25, 0xd9, 0xa3, 0xb5, 0x34, 0x59, 0x25, 0xff, 0x64, 0x5f, 0xd0, 0x26, + 0xaf, 0x71, 0x59, 0xf2, 0x9e, 0x5a, 0x65, 0x9f, 0x87, 0x59, 0x1f, 0x17, 0xa6, 0xa0, 0x36, 0x49, + 0xa5, 0x7a, 0x35, 0x86, 0x59, 0x37, 0x52, 0xe1, 0xf2, 0x24, 0x16, 0xd9, 0x79, 0x9d, 0xda, 0x03, + 0x7c, 0x7d, 0xaa, 0x9e, 0x19, 0x2b, 0xb3, 0x96, 0x5f, 0x99, 0xe5, 0xdc, 0x0a, 0x58, 0xad, 0xa3, + 0x73, 0xa3, 0x70, 0x7c, 0x6e, 0x14, 0x4e, 0xce, 0x8d, 0xc2, 0xc7, 0x91, 0x81, 0x8e, 0x46, 0x06, + 0x3a, 0x1e, 0x19, 0xe8, 0x64, 0x64, 0xa0, 0x1f, 0x23, 0x03, 0x1d, 0xfe, 0x34, 0x0a, 0x6f, 0x8a, + 0x83, 0xcd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x40, 0xe0, 0x08, 0xf3, 0x05, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto new file mode 100644 index 000000000..4a86999f1 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -0,0 +1,109 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.node.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://kubernetes.io/docs/concepts/containers/runtime-class/ +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. + optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ + // This field is in beta starting v1.18 + // and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1/register.go b/vendor/k8s.io/api/node/v1/register.go new file mode 100644 index 000000000..d514e9b5a --- /dev/null +++ b/vendor/k8s.io/api/node/v1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go new file mode 100644 index 000000000..b32cc36c4 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/types.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://kubernetes.io/docs/concepts/containers/runtime-class/ +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ + // This field is in beta starting v1.18 + // and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..c68c40e90 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/\nThis field is in beta starting v1.18 and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go similarity index 54% rename from vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go index ed6c31a32..35084da7e 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go @@ -18,34 +18,66 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPreset) DeepCopyInto(out *PodPreset) { +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset. -func (in *PodPreset) DeepCopy() *PodPreset { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { if in == nil { return nil } - out := new(PodPreset) + out := new(RuntimeClass) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPreset) DeepCopyObject() runtime.Object { +func (in *RuntimeClass) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -53,13 +85,13 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) + *out = make([]RuntimeClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -67,18 +99,18 @@ func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList. -func (in *PodPresetList) DeepCopy() *PodPresetList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { if in == nil { return nil } - out := new(PodPresetList) + out := new(RuntimeClassList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPresetList) DeepCopyObject() runtime.Object { +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -86,33 +118,18 @@ func (in *PodPresetList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { +func (in *Scheduling) DeepCopyInto(out *Scheduling) { *out = *in - in.Selector.DeepCopyInto(&out.Selector) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -120,12 +137,12 @@ func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec. -func (in *PodPresetSpec) DeepCopy() *PodPresetSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { if in == nil { return nil } - out := new(PodPresetSpec) + out := new(Scheduling) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index e6658a96f..abd2c09b6 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -852,7 +852,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -869,10 +869,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -988,10 +985,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1108,10 +1102,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1265,10 +1256,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1428,7 +1416,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1479,10 +1467,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index ac05f839e..310ad490b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.node.v1alpha1; @@ -78,8 +78,8 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. + // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // requirements, and is immutable. optional string runtimeHandler = 1; // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index b59767107..03e7e6f33 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -56,8 +56,8 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. + // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 390000172..d3011466b 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -58,7 +58,7 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index b85cbd295..4bfdd5df3 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -767,7 +767,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -784,10 +784,7 @@ func (m *Overhead) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -974,10 +971,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1094,10 +1088,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1257,7 +1248,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -1308,10 +1299,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 49166798d..5c2d9d50a 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.node.v1beta1; @@ -57,8 +57,8 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. optional string handler = 2; // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 1d2b96312..89559949c 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -48,8 +48,8 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. + // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` // Overhead represents the resource overhead associated with running a pod for a diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 681f73f23..a486147f0 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 40ec7ef7f..e91b497cb 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -2540,10 +2540,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2625,10 +2622,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2730,10 +2724,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2852,10 +2843,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2971,10 +2959,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3062,10 +3047,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3153,10 +3135,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3305,10 +3284,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3425,10 +3401,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3586,10 +3559,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3770,7 +3740,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3863,10 +3833,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3982,10 +3949,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4102,10 +4066,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4861,10 +4822,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4980,10 +4938,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5099,10 +5054,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5217,10 +5169,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5338,10 +5287,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5457,10 +5403,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 30db726f9..18a1c6578 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.policy.v1beta1; diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index ba6872d62..678c00512 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -1557,10 +1557,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1713,10 +1710,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1866,10 +1860,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1986,10 +1977,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2106,10 +2094,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2319,10 +2304,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2439,10 +2421,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2592,10 +2571,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2712,10 +2688,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2832,10 +2805,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2981,10 +2951,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3162,10 +3129,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 71fa08341..22c6dae4b 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 3b12526da..94c1bef8b 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -1558,10 +1558,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1714,10 +1711,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1867,10 +1861,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1987,10 +1978,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2107,10 +2095,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2320,10 +2305,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2440,10 +2422,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2593,10 +2572,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2713,10 +2689,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2833,10 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2982,10 +2952,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3163,10 +3130,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 60354e6a6..caed7ec30 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1alpha1; diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 53d36320e..ad5d7cb05 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -1557,10 +1557,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1713,10 +1710,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1866,10 +1860,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1986,10 +1977,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2106,10 +2094,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2319,10 +2304,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2439,10 +2421,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2592,10 +2571,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2712,10 +2688,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2832,10 +2805,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2981,10 +2951,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3162,10 +3129,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 44cd6c24a..17d3741f0 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.rbac.v1beta1; diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index efc3102ef..c5ef2f50e 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -511,10 +511,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -631,10 +628,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index e7489f539..1f9a7474a 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 8a62104db..16f3c7cb4 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -511,10 +511,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -631,10 +628,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 8c4a2c460..da27a13e7 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1alpha1; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index b89af56b3..64b1c1505 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -511,10 +511,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -631,10 +628,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index eae3c01f3..99bdaabee 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.scheduling.v1beta1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go deleted file mode 100644 index 7ed066d31..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1053 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{0} -} -func (m *PodPreset) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPreset) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPreset.Merge(m, src) -} -func (m *PodPreset) XXX_Size() int { - return m.Size() -} -func (m *PodPreset) XXX_DiscardUnknown() { - xxx_messageInfo_PodPreset.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPreset proto.InternalMessageInfo - -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{1} -} -func (m *PodPresetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetList.Merge(m, src) -} -func (m *PodPresetList) XXX_Size() int { - return m.Size() -} -func (m *PodPresetList) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetList proto.InternalMessageInfo - -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{2} -} -func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetSpec.Merge(m, src) -} -func (m *PodPresetSpec) XXX_Size() int { - return m.Size() -} -func (m *PodPresetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") - proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") - proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) -} - -var fileDescriptor_48fab0a6ea4b79ce = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} - -func (m *PodPreset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodPresetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.EnvFrom) > 0 { - for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PodPreset) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodPresetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodPresetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PodPreset) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodPreset{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnv += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeMounts += "}" - s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PodPreset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodPreset{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, v11.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, v11.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto deleted file mode 100644 index db1ec9312..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.settings.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -message PodPreset { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // +optional - optional PodPresetSpec spec = 2; -} - -// PodPresetList is a list of PodPreset objects. -message PodPresetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated PodPreset items = 2; -} - -// PodPresetSpec is a description of a pod preset. -message PodPresetSpec { - // Selector is a label query over a set of resources, in this case pods. - // Required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // Env defines the collection of EnvVar to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvVar env = 2; - - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3; - - // Volumes defines the collection of Volume to inject into the pod. - // +optional - repeated k8s.io.api.core.v1.Volume volumes = 4; - - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; -} - diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go deleted file mode 100644 index 8cc99d440..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -type PodPreset struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // +optional - Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodPresetSpec is a description of a pod preset. -type PodPresetSpec struct { - // Selector is a label query over a set of resources, in this case pods. - // Required. - Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - - // Env defines the collection of EnvVar to inject into containers. - // +optional - Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` - // Volumes defines the collection of Volume to inject into the pod. - // +optional - Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPresetList is a list of PodPreset objects. -type PodPresetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 0501e0af3..000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PodPreset = map[string]string{ - "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", -} - -func (PodPreset) SwaggerDoc() map[string]string { - return map_PodPreset -} - -var map_PodPresetList = map[string]string{ - "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (PodPresetList) SwaggerDoc() map[string]string { - return map_PodPresetList -} - -var map_PodPresetSpec = map[string]string{ - "": "PodPresetSpec is a description of a pod preset.", - "selector": "Selector is a label query over a set of resources, in this case pods. Required.", - "env": "Env defines the collection of EnvVar to inject into containers.", - "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", - "volumes": "Volumes defines the collection of Volume to inject into the pod.", - "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", -} - -func (PodPresetSpec) SwaggerDoc() map[string]string { - return map_PodPresetSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 2c7088c38..34a3c34dc 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -298,10 +298,38 @@ func (m *StorageClassList) XXX_DiscardUnknown() { var xxx_messageInfo_StorageClassList proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{9} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} + return fileDescriptor_3b530c1983504d8d, []int{10} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +357,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} + return fileDescriptor_3b530c1983504d8d, []int{11} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} + return fileDescriptor_3b530c1983504d8d, []int{12} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} + return fileDescriptor_3b530c1983504d8d, []int{13} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} + return fileDescriptor_3b530c1983504d8d, []int{14} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +469,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} + return fileDescriptor_3b530c1983504d8d, []int{15} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +497,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} + return fileDescriptor_3b530c1983504d8d, []int{16} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -505,6 +533,7 @@ func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1.TokenRequest") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") @@ -520,95 +549,101 @@ func init() { } var fileDescriptor_3b530c1983504d8d = []byte{ - // 1395 bytes of a gzipped FileDescriptorProto + // 1500 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x76, 0x7e, 0x8c, 0x93, 0xc6, 0x99, 0xe4, 0xfb, 0xfd, 0xfa, 0x9b, 0x83, 0x37, - 0x5a, 0x2a, 0x08, 0x85, 0xae, 0x9b, 0x52, 0xaa, 0xaa, 0x52, 0x91, 0xe2, 0xc4, 0xa5, 0x11, 0x71, - 0x12, 0x8d, 0x4b, 0x85, 0x10, 0x20, 0x26, 0xbb, 0x13, 0x67, 0x1b, 0xef, 0xce, 0x76, 0x77, 0x6c, - 0xf0, 0x8d, 0x13, 0x37, 0x24, 0xb8, 0xf2, 0x57, 0x80, 0x04, 0x17, 0x8e, 0x9c, 0xca, 0xad, 0xe2, - 0xd4, 0xd3, 0x8a, 0x2e, 0x67, 0xb8, 0x71, 0xc9, 0x09, 0xcd, 0xec, 0xd8, 0xfb, 0xc3, 0xeb, 0x34, - 0xbd, 0xe4, 0xe6, 0x79, 0xef, 0x7d, 0x3e, 0xef, 0xbd, 0x79, 0x3f, 0x66, 0x0d, 0xde, 0x3b, 0xbd, - 0xe3, 0xeb, 0x16, 0xad, 0x9f, 0xf6, 0x8e, 0x88, 0xe7, 0x10, 0x46, 0xfc, 0x7a, 0x9f, 0x38, 0x26, - 0xf5, 0xea, 0x52, 0x81, 0x5d, 0xab, 0xee, 0x33, 0xea, 0xe1, 0x0e, 0xa9, 0xf7, 0x37, 0xeb, 0x1d, - 0xe2, 0x10, 0x0f, 0x33, 0x62, 0xea, 0xae, 0x47, 0x19, 0x85, 0xff, 0x89, 0xcc, 0x74, 0xec, 0x5a, - 0xba, 0x34, 0xd3, 0xfb, 0x9b, 0x6b, 0xd7, 0x3b, 0x16, 0x3b, 0xe9, 0x1d, 0xe9, 0x06, 0xb5, 0xeb, - 0x1d, 0xda, 0xa1, 0x75, 0x61, 0x7d, 0xd4, 0x3b, 0x16, 0x27, 0x71, 0x10, 0xbf, 0x22, 0x96, 0x35, - 0x2d, 0xe1, 0xcc, 0xa0, 0x5e, 0x9e, 0xa7, 0xb5, 0x5b, 0xb1, 0x8d, 0x8d, 0x8d, 0x13, 0xcb, 0x21, - 0xde, 0xa0, 0xee, 0x9e, 0x76, 0xb8, 0xc0, 0xaf, 0xdb, 0x84, 0xe1, 0x3c, 0x54, 0x7d, 0x12, 0xca, - 0xeb, 0x39, 0xcc, 0xb2, 0xc9, 0x18, 0xe0, 0xf6, 0xcb, 0x00, 0xbe, 0x71, 0x42, 0x6c, 0x9c, 0xc5, - 0x69, 0x3f, 0x2b, 0x60, 0x7e, 0xbb, 0xbd, 0xbb, 0xe3, 0x59, 0x7d, 0xe2, 0xc1, 0xcf, 0xc1, 0x1c, - 0x8f, 0xc8, 0xc4, 0x0c, 0x57, 0x95, 0x75, 0x65, 0xa3, 0x7c, 0xf3, 0x86, 0x1e, 0xdf, 0xd4, 0x88, - 0x58, 0x77, 0x4f, 0x3b, 0x5c, 0xe0, 0xeb, 0xdc, 0x5a, 0xef, 0x6f, 0xea, 0x07, 0x47, 0x8f, 0x89, - 0xc1, 0x5a, 0x84, 0xe1, 0x06, 0x7c, 0x1a, 0xa8, 0x53, 0x61, 0xa0, 0x82, 0x58, 0x86, 0x46, 0xac, - 0xf0, 0x3e, 0x28, 0xfa, 0x2e, 0x31, 0xaa, 0xd3, 0x82, 0xfd, 0xaa, 0x9e, 0x5b, 0x07, 0x7d, 0x14, - 0x51, 0xdb, 0x25, 0x46, 0x63, 0x41, 0x32, 0x16, 0xf9, 0x09, 0x09, 0xbc, 0xf6, 0x93, 0x02, 0x16, - 0x47, 0x56, 0x7b, 0x96, 0xcf, 0xe0, 0x27, 0x63, 0xb1, 0xeb, 0x17, 0x8b, 0x9d, 0xa3, 0x45, 0xe4, - 0x15, 0xe9, 0x67, 0x6e, 0x28, 0x49, 0xc4, 0xdd, 0x04, 0x25, 0x8b, 0x11, 0xdb, 0xaf, 0x4e, 0xaf, - 0x17, 0x36, 0xca, 0x37, 0xd7, 0x5f, 0x16, 0x78, 0x63, 0x51, 0x92, 0x95, 0x76, 0x39, 0x0c, 0x45, - 0x68, 0xed, 0x9f, 0xe9, 0x44, 0xd8, 0x3c, 0x1d, 0x78, 0x17, 0x5c, 0xc1, 0x8c, 0x61, 0xe3, 0x04, - 0x91, 0x27, 0x3d, 0xcb, 0x23, 0xa6, 0x08, 0x7e, 0xae, 0x01, 0xc3, 0x40, 0xbd, 0xb2, 0x95, 0xd2, - 0xa0, 0x8c, 0x25, 0xc7, 0xba, 0xd4, 0xdc, 0x75, 0x8e, 0xe9, 0x81, 0xd3, 0xa2, 0x3d, 0x87, 0x89, - 0x6b, 0x95, 0xd8, 0xc3, 0x94, 0x06, 0x65, 0x2c, 0xa1, 0x01, 0x56, 0xfb, 0xb4, 0xdb, 0xb3, 0xc9, - 0x9e, 0x75, 0x4c, 0x8c, 0x81, 0xd1, 0x25, 0x2d, 0x6a, 0x12, 0xbf, 0x5a, 0x58, 0x2f, 0x6c, 0xcc, - 0x37, 0xea, 0x61, 0xa0, 0xae, 0x3e, 0xca, 0xd1, 0x9f, 0x05, 0xea, 0x4a, 0x8e, 0x1c, 0xe5, 0x92, - 0xc1, 0x7b, 0x60, 0x49, 0x5e, 0xce, 0x36, 0x76, 0xb1, 0x61, 0xb1, 0x41, 0xb5, 0x28, 0x22, 0x5c, - 0x09, 0x03, 0x75, 0xa9, 0x9d, 0x56, 0xa1, 0xac, 0x2d, 0x7c, 0x00, 0x16, 0x8f, 0xfd, 0xf7, 0x3d, - 0xda, 0x73, 0x0f, 0x69, 0xd7, 0x32, 0x06, 0xd5, 0xd2, 0xba, 0xb2, 0x31, 0xdf, 0xd0, 0xc2, 0x40, - 0x5d, 0xbc, 0xdf, 0x4e, 0x28, 0xce, 0xb2, 0x02, 0x94, 0x06, 0x6a, 0x3f, 0x2a, 0x60, 0x76, 0xbb, - 0xbd, 0xbb, 0x4f, 0x4d, 0x72, 0x09, 0x4d, 0xbe, 0x93, 0x6a, 0x72, 0x6d, 0x72, 0xaf, 0xf0, 0x78, - 0x26, 0xb6, 0xf8, 0xdf, 0x51, 0x8b, 0x73, 0x1b, 0x39, 0x9e, 0xeb, 0xa0, 0xe8, 0x60, 0x9b, 0x88, - 0xa8, 0xe7, 0x63, 0xcc, 0x3e, 0xb6, 0x09, 0x12, 0x1a, 0xf8, 0x3a, 0x98, 0x71, 0xa8, 0x49, 0x76, - 0x77, 0x84, 0xef, 0xf9, 0xc6, 0x15, 0x69, 0x33, 0xb3, 0x2f, 0xa4, 0x48, 0x6a, 0xe1, 0x2d, 0xb0, - 0xc0, 0xa8, 0x4b, 0xbb, 0xb4, 0x33, 0xf8, 0x80, 0x0c, 0x86, 0x55, 0xaf, 0x84, 0x81, 0xba, 0xf0, - 0x30, 0x21, 0x47, 0x29, 0x2b, 0xf8, 0x29, 0x28, 0xe3, 0x6e, 0x97, 0x1a, 0x98, 0xe1, 0xa3, 0x2e, - 0x11, 0xa5, 0x2c, 0xdf, 0xbc, 0x36, 0x21, 0xbd, 0xa8, 0x4b, 0xb8, 0x5f, 0x44, 0x7c, 0xda, 0xf3, - 0x0c, 0xe2, 0x37, 0x96, 0xc2, 0x40, 0x2d, 0x6f, 0xc5, 0x14, 0x28, 0xc9, 0xa7, 0xfd, 0xa0, 0x80, - 0xb2, 0x4c, 0xf8, 0x12, 0x26, 0x7a, 0x3b, 0x3d, 0xd1, 0xb5, 0xf3, 0xab, 0x34, 0x61, 0x9e, 0x3f, - 0x1b, 0x45, 0x2c, 0x86, 0xf9, 0x00, 0xcc, 0x9a, 0xa2, 0x54, 0x7e, 0x55, 0x11, 0xac, 0x57, 0xcf, - 0x67, 0x95, 0xbb, 0x62, 0x49, 0x72, 0xcf, 0x46, 0x67, 0x1f, 0x0d, 0x59, 0xb4, 0x6f, 0x66, 0xc0, - 0xc2, 0x70, 0x4c, 0xba, 0xd8, 0xf7, 0x2f, 0xa1, 0x79, 0xdf, 0x05, 0x65, 0xd7, 0xa3, 0x7d, 0xcb, - 0xb7, 0xa8, 0x43, 0x3c, 0xd9, 0x47, 0x2b, 0x12, 0x52, 0x3e, 0x8c, 0x55, 0x28, 0x69, 0x07, 0x3b, - 0x00, 0xb8, 0xd8, 0xc3, 0x36, 0x61, 0x3c, 0xfb, 0x82, 0xc8, 0xfe, 0x9d, 0x09, 0xd9, 0x27, 0x33, - 0xd2, 0x0f, 0x47, 0xa8, 0xa6, 0xc3, 0xbc, 0x41, 0x1c, 0x5d, 0xac, 0x40, 0x09, 0x6a, 0x78, 0x0a, - 0x16, 0x3d, 0x62, 0x74, 0xb1, 0x65, 0xcb, 0xa5, 0x50, 0x14, 0x11, 0x36, 0xf9, 0x52, 0x40, 0x49, - 0xc5, 0x59, 0xa0, 0xde, 0x18, 0x7f, 0xa0, 0xf5, 0x43, 0xe2, 0xf9, 0x96, 0xcf, 0x88, 0xc3, 0xa2, - 0x0e, 0x4d, 0x61, 0x50, 0x9a, 0x9b, 0xcf, 0x89, 0xcd, 0xd7, 0xe5, 0x81, 0xcb, 0x2c, 0xea, 0xf8, - 0xd5, 0x52, 0x3c, 0x27, 0xad, 0x84, 0x1c, 0xa5, 0xac, 0xe0, 0x1e, 0x58, 0xe5, 0x7d, 0xfd, 0x45, - 0xe4, 0xa0, 0xf9, 0xa5, 0x8b, 0x1d, 0x7e, 0x4b, 0xd5, 0x19, 0xb1, 0xfb, 0xaa, 0x7c, 0xb7, 0x6e, - 0xe5, 0xe8, 0x51, 0x2e, 0x0a, 0x7e, 0x04, 0x96, 0xa3, 0xe5, 0xda, 0xb0, 0x1c, 0xd3, 0x72, 0x3a, - 0x7c, 0xb5, 0x56, 0x67, 0x45, 0xd2, 0xd7, 0xc2, 0x40, 0x5d, 0x7e, 0x94, 0x55, 0x9e, 0xe5, 0x09, - 0xd1, 0x38, 0x09, 0x7c, 0x02, 0x96, 0x85, 0x47, 0x62, 0xca, 0xa1, 0xb7, 0x88, 0x5f, 0x9d, 0x13, - 0xa5, 0xdb, 0x48, 0x96, 0x8e, 0x5f, 0x1d, 0xaf, 0xdb, 0x70, 0x35, 0xb4, 0x49, 0x97, 0x18, 0x8c, - 0x7a, 0x0f, 0x89, 0x67, 0x37, 0xfe, 0x2f, 0xeb, 0xb5, 0xbc, 0x95, 0xa5, 0x42, 0xe3, 0xec, 0x6b, - 0xf7, 0xc0, 0x52, 0xa6, 0xe0, 0xb0, 0x02, 0x0a, 0xa7, 0x64, 0x10, 0x2d, 0x35, 0xc4, 0x7f, 0xc2, - 0x55, 0x50, 0xea, 0xe3, 0x6e, 0x8f, 0x44, 0xcd, 0x87, 0xa2, 0xc3, 0xdd, 0xe9, 0x3b, 0x8a, 0xf6, - 0x8b, 0x02, 0x2a, 0xc9, 0xee, 0xb9, 0x84, 0x3d, 0xf1, 0x20, 0xbd, 0x27, 0x5e, 0xbb, 0x40, 0x4f, - 0x4f, 0x58, 0x16, 0xdf, 0x4f, 0x83, 0x4a, 0x54, 0x97, 0xe8, 0x5d, 0xb7, 0x89, 0xc3, 0x2e, 0x61, - 0xa0, 0x5b, 0xa9, 0xd7, 0xe8, 0xad, 0x73, 0xd7, 0x75, 0x1c, 0xd8, 0xa4, 0x67, 0x09, 0x7e, 0x08, - 0x66, 0x7c, 0x86, 0x59, 0x8f, 0x0f, 0x39, 0x27, 0xbc, 0x7e, 0x51, 0x42, 0x01, 0x8a, 0x5f, 0xa4, - 0xe8, 0x8c, 0x24, 0x99, 0xf6, 0xab, 0x02, 0x56, 0xb3, 0x90, 0x4b, 0xa8, 0xee, 0x5e, 0xba, 0xba, - 0x6f, 0x5c, 0x30, 0x99, 0x09, 0x15, 0xfe, 0x5d, 0x01, 0xff, 0x1d, 0xcb, 0x5b, 0xbc, 0x7d, 0x7c, - 0x27, 0xb8, 0x99, 0xcd, 0xb3, 0x1f, 0xbf, 0xe5, 0x62, 0x27, 0x1c, 0xe6, 0xe8, 0x51, 0x2e, 0x0a, - 0x3e, 0x06, 0x15, 0xcb, 0xe9, 0x5a, 0x0e, 0x89, 0x64, 0xed, 0xb8, 0xbe, 0xb9, 0x83, 0x9b, 0x65, - 0x16, 0xc5, 0x5d, 0x0d, 0x03, 0xb5, 0xb2, 0x9b, 0x61, 0x41, 0x63, 0xbc, 0xda, 0x6f, 0x39, 0x95, - 0x11, 0xaf, 0xdd, 0xdb, 0x60, 0x2e, 0xfa, 0x20, 0x25, 0x9e, 0x4c, 0x63, 0x74, 0xd3, 0x5b, 0x52, - 0x8e, 0x46, 0x16, 0xa2, 0x6f, 0xc4, 0x55, 0xc8, 0x40, 0x2f, 0xdc, 0x37, 0x02, 0x94, 0xe8, 0x1b, - 0x71, 0x46, 0x92, 0x8c, 0x07, 0xc1, 0xbf, 0x69, 0xc4, 0x5d, 0x16, 0xd2, 0x41, 0xec, 0x4b, 0x39, - 0x1a, 0x59, 0x68, 0x7f, 0x15, 0x72, 0x0a, 0x24, 0x1a, 0x30, 0x91, 0xcd, 0xf0, 0x13, 0x3c, 0x9b, - 0x8d, 0x39, 0xca, 0xc6, 0x84, 0xdf, 0x29, 0x00, 0xe2, 0x11, 0x45, 0x6b, 0xd8, 0xa0, 0x51, 0x17, - 0x35, 0x5f, 0x69, 0x24, 0xf4, 0xad, 0x31, 0x9e, 0xe8, 0x25, 0x5c, 0x93, 0xfe, 0xe1, 0xb8, 0x01, - 0xca, 0x71, 0x0e, 0x4d, 0x50, 0x8e, 0xa4, 0x4d, 0xcf, 0xa3, 0x9e, 0x1c, 0x4f, 0xed, 0xdc, 0x58, - 0x84, 0x65, 0xa3, 0x26, 0x3e, 0xcb, 0x62, 0xe8, 0x59, 0xa0, 0x96, 0x13, 0x7a, 0x94, 0xa4, 0xe5, - 0x5e, 0x4c, 0x12, 0x7b, 0x29, 0xbe, 0x9a, 0x97, 0x1d, 0x32, 0xd9, 0x4b, 0x82, 0x76, 0xad, 0x09, - 0xfe, 0x37, 0xe1, 0x5a, 0x5e, 0xe9, 0xbd, 0xf8, 0x5a, 0x01, 0x49, 0x1f, 0x70, 0x0f, 0x14, 0xf9, - 0xbf, 0x61, 0xb9, 0x48, 0xae, 0x5d, 0x6c, 0x91, 0x3c, 0xb4, 0x6c, 0x12, 0xaf, 0x42, 0x7e, 0x42, - 0x82, 0x05, 0xbe, 0x09, 0x66, 0x6d, 0xe2, 0xfb, 0xb8, 0x23, 0x3d, 0xc7, 0x1f, 0x72, 0xad, 0x48, - 0x8c, 0x86, 0x7a, 0xed, 0x36, 0x58, 0xc9, 0xf9, 0x20, 0x86, 0x2a, 0x28, 0x19, 0xe2, 0x8f, 0x1b, - 0x0f, 0xa8, 0xd4, 0x98, 0xe7, 0x1b, 0x65, 0x5b, 0xfc, 0x5f, 0x8b, 0xe4, 0x8d, 0x8d, 0xa7, 0x2f, - 0x6a, 0x53, 0xcf, 0x5e, 0xd4, 0xa6, 0x9e, 0xbf, 0xa8, 0x4d, 0x7d, 0x15, 0xd6, 0x94, 0xa7, 0x61, - 0x4d, 0x79, 0x16, 0xd6, 0x94, 0xe7, 0x61, 0x4d, 0xf9, 0x23, 0xac, 0x29, 0xdf, 0xfe, 0x59, 0x9b, - 0xfa, 0x78, 0xba, 0xbf, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x83, 0x24, 0x44, 0x13, - 0x11, 0x00, 0x00, + 0x17, 0xcf, 0xc6, 0xce, 0xaf, 0x71, 0xd2, 0x24, 0x93, 0xf4, 0xfb, 0x35, 0x39, 0xd8, 0xd1, 0xb6, + 0x82, 0x50, 0xe8, 0xba, 0x29, 0xa5, 0xaa, 0x2a, 0x15, 0x29, 0x9b, 0xb8, 0x34, 0x22, 0xbf, 0x34, + 0x0e, 0x15, 0x42, 0x80, 0x3a, 0xd9, 0x9d, 0x38, 0x5b, 0x7b, 0x77, 0xb6, 0x3b, 0x63, 0x53, 0xdf, + 0xe0, 0xc2, 0x0d, 0x09, 0xae, 0x88, 0x3f, 0x02, 0x24, 0xb8, 0x70, 0xe4, 0x54, 0x6e, 0x15, 0xa7, + 0x9e, 0x2c, 0x6a, 0xce, 0xf0, 0x07, 0xe4, 0x84, 0x66, 0x76, 0xec, 0xfd, 0xe1, 0x75, 0x9a, 0x5e, + 0x72, 0xf3, 0xbe, 0x1f, 0x9f, 0xf7, 0x66, 0xde, 0x7b, 0x9f, 0x37, 0x06, 0x1f, 0x34, 0xee, 0x30, + 0xc3, 0xa1, 0x95, 0x46, 0xeb, 0x88, 0x04, 0x1e, 0xe1, 0x84, 0x55, 0xda, 0xc4, 0xb3, 0x69, 0x50, + 0x51, 0x0a, 0xec, 0x3b, 0x15, 0xc6, 0x69, 0x80, 0xeb, 0xa4, 0xd2, 0x5e, 0xaf, 0xd4, 0x89, 0x47, + 0x02, 0xcc, 0x89, 0x6d, 0xf8, 0x01, 0xe5, 0x14, 0x5e, 0x0e, 0xcd, 0x0c, 0xec, 0x3b, 0x86, 0x32, + 0x33, 0xda, 0xeb, 0x2b, 0xd7, 0xeb, 0x0e, 0x3f, 0x69, 0x1d, 0x19, 0x16, 0x75, 0x2b, 0x75, 0x5a, + 0xa7, 0x15, 0x69, 0x7d, 0xd4, 0x3a, 0x96, 0x5f, 0xf2, 0x43, 0xfe, 0x0a, 0x51, 0x56, 0xf4, 0x58, + 0x30, 0x8b, 0x06, 0x59, 0x91, 0x56, 0x6e, 0x45, 0x36, 0x2e, 0xb6, 0x4e, 0x1c, 0x8f, 0x04, 0x9d, + 0x8a, 0xdf, 0xa8, 0x0b, 0x01, 0xab, 0xb8, 0x84, 0xe3, 0x2c, 0xaf, 0xca, 0x28, 0xaf, 0xa0, 0xe5, + 0x71, 0xc7, 0x25, 0x43, 0x0e, 0xb7, 0x5f, 0xe5, 0xc0, 0xac, 0x13, 0xe2, 0xe2, 0xb4, 0x9f, 0xfe, + 0xab, 0x06, 0x66, 0x36, 0x6b, 0xdb, 0x5b, 0x81, 0xd3, 0x26, 0x01, 0x7c, 0x04, 0xa6, 0x45, 0x46, + 0x36, 0xe6, 0xb8, 0xa8, 0xad, 0x6a, 0x6b, 0x85, 0x9b, 0x37, 0x8c, 0xe8, 0xa6, 0x06, 0xc0, 0x86, + 0xdf, 0xa8, 0x0b, 0x01, 0x33, 0x84, 0xb5, 0xd1, 0x5e, 0x37, 0xf6, 0x8f, 0x1e, 0x13, 0x8b, 0xef, + 0x12, 0x8e, 0x4d, 0xf8, 0xac, 0x5b, 0x1e, 0xeb, 0x75, 0xcb, 0x20, 0x92, 0xa1, 0x01, 0x2a, 0xbc, + 0x0f, 0xf2, 0xcc, 0x27, 0x56, 0x71, 0x5c, 0xa2, 0x5f, 0x35, 0x32, 0xeb, 0x60, 0x0c, 0x32, 0xaa, + 0xf9, 0xc4, 0x32, 0x67, 0x15, 0x62, 0x5e, 0x7c, 0x21, 0xe9, 0xaf, 0xff, 0xa2, 0x81, 0xb9, 0x81, + 0xd5, 0x8e, 0xc3, 0x38, 0xfc, 0x6c, 0x28, 0x77, 0xe3, 0x7c, 0xb9, 0x0b, 0x6f, 0x99, 0xf9, 0x82, + 0x8a, 0x33, 0xdd, 0x97, 0xc4, 0xf2, 0xae, 0x82, 0x09, 0x87, 0x13, 0x97, 0x15, 0xc7, 0x57, 0x73, + 0x6b, 0x85, 0x9b, 0xab, 0xaf, 0x4a, 0xdc, 0x9c, 0x53, 0x60, 0x13, 0xdb, 0xc2, 0x0d, 0x85, 0xde, + 0xfa, 0x8f, 0xf9, 0x58, 0xda, 0xe2, 0x38, 0xf0, 0x2e, 0xb8, 0x84, 0x39, 0xc7, 0xd6, 0x09, 0x22, + 0x4f, 0x5a, 0x4e, 0x40, 0x6c, 0x99, 0xfc, 0xb4, 0x09, 0x7b, 0xdd, 0xf2, 0xa5, 0x8d, 0x84, 0x06, + 0xa5, 0x2c, 0x85, 0xaf, 0x4f, 0xed, 0x6d, 0xef, 0x98, 0xee, 0x7b, 0xbb, 0xb4, 0xe5, 0x71, 0x79, + 0xad, 0xca, 0xf7, 0x20, 0xa1, 0x41, 0x29, 0x4b, 0x68, 0x81, 0xe5, 0x36, 0x6d, 0xb6, 0x5c, 0xb2, + 0xe3, 0x1c, 0x13, 0xab, 0x63, 0x35, 0xc9, 0x2e, 0xb5, 0x09, 0x2b, 0xe6, 0x56, 0x73, 0x6b, 0x33, + 0x66, 0xa5, 0xd7, 0x2d, 0x2f, 0x3f, 0xcc, 0xd0, 0x9f, 0x76, 0xcb, 0x4b, 0x19, 0x72, 0x94, 0x09, + 0x06, 0xef, 0x81, 0x79, 0x75, 0x39, 0x9b, 0xd8, 0xc7, 0x96, 0xc3, 0x3b, 0xc5, 0xbc, 0xcc, 0x70, + 0xa9, 0xd7, 0x2d, 0xcf, 0xd7, 0x92, 0x2a, 0x94, 0xb6, 0x85, 0x0f, 0xc0, 0xdc, 0x31, 0xfb, 0x30, + 0xa0, 0x2d, 0xff, 0x80, 0x36, 0x1d, 0xab, 0x53, 0x9c, 0x58, 0xd5, 0xd6, 0x66, 0x4c, 0xbd, 0xd7, + 0x2d, 0xcf, 0xdd, 0xaf, 0xc5, 0x14, 0xa7, 0x69, 0x01, 0x4a, 0x3a, 0xc2, 0x47, 0x60, 0x8e, 0xd3, + 0x06, 0xf1, 0xc4, 0xd5, 0x11, 0xc6, 0x59, 0x71, 0x52, 0x96, 0xf1, 0xca, 0x88, 0x32, 0x1e, 0xc6, + 0x6c, 0xcd, 0xcb, 0xaa, 0x92, 0x73, 0x71, 0x29, 0x43, 0x49, 0x40, 0xb8, 0x09, 0x16, 0x83, 0xb0, + 0x2e, 0x0c, 0x11, 0xbf, 0x75, 0xd4, 0x74, 0xd8, 0x49, 0x71, 0x4a, 0x1e, 0xf6, 0x72, 0xaf, 0x5b, + 0x5e, 0x44, 0x69, 0x25, 0x1a, 0xb6, 0xd7, 0x7f, 0xd6, 0xc0, 0xd4, 0x66, 0x6d, 0x7b, 0x8f, 0xda, + 0xe4, 0x02, 0x66, 0x71, 0x2b, 0x31, 0x8b, 0xfa, 0xe8, 0x96, 0x16, 0xf9, 0x8c, 0x9c, 0xc4, 0x7f, + 0xc3, 0x49, 0x14, 0x36, 0x8a, 0x45, 0x56, 0x41, 0xde, 0xc3, 0x2e, 0x91, 0x59, 0xcf, 0x44, 0x3e, + 0x7b, 0xd8, 0x25, 0x48, 0x6a, 0xe0, 0x9b, 0x60, 0xd2, 0xa3, 0x36, 0xd9, 0xde, 0x92, 0xb1, 0x67, + 0xcc, 0x4b, 0xca, 0x66, 0x72, 0x4f, 0x4a, 0x91, 0xd2, 0xc2, 0x5b, 0x60, 0x96, 0x53, 0x9f, 0x36, + 0x69, 0xbd, 0xf3, 0x11, 0xe9, 0xf4, 0x9b, 0x73, 0xa1, 0xd7, 0x2d, 0xcf, 0x1e, 0xc6, 0xe4, 0x28, + 0x61, 0x05, 0x3f, 0x07, 0x05, 0xdc, 0x6c, 0x52, 0x0b, 0x73, 0x7c, 0xd4, 0x24, 0xb2, 0xe3, 0x0a, + 0x37, 0xaf, 0x8d, 0x38, 0x5e, 0xd8, 0xcc, 0x22, 0x2e, 0x22, 0x8c, 0xb6, 0x02, 0x8b, 0x30, 0x73, + 0xbe, 0xd7, 0x2d, 0x17, 0x36, 0x22, 0x08, 0x14, 0xc7, 0xd3, 0x7f, 0xd2, 0x40, 0x41, 0x1d, 0xf8, + 0x02, 0x88, 0x67, 0x33, 0x49, 0x3c, 0xa5, 0xb3, 0xab, 0x34, 0x82, 0x76, 0xbe, 0x18, 0x64, 0x2c, + 0x39, 0x67, 0x1f, 0x4c, 0xd9, 0xb2, 0x54, 0xac, 0xa8, 0x49, 0xd4, 0xab, 0x67, 0xa3, 0x2a, 0x4a, + 0x9b, 0x57, 0xd8, 0x53, 0xe1, 0x37, 0x43, 0x7d, 0x14, 0xfd, 0xdb, 0x49, 0x30, 0xdb, 0x9f, 0xe6, + 0x26, 0x66, 0xec, 0x02, 0x9a, 0xf7, 0x7d, 0x50, 0xf0, 0x03, 0xda, 0x76, 0x98, 0x43, 0x3d, 0x12, + 0xa8, 0x3e, 0x5a, 0x52, 0x2e, 0x85, 0x83, 0x48, 0x85, 0xe2, 0x76, 0xb0, 0x0e, 0x80, 0x8f, 0x03, + 0xec, 0x12, 0x2e, 0x4e, 0x9f, 0x93, 0xa7, 0x7f, 0x6f, 0xc4, 0xe9, 0xe3, 0x27, 0x32, 0x0e, 0x06, + 0x5e, 0x55, 0x8f, 0x07, 0x9d, 0x28, 0xbb, 0x48, 0x81, 0x62, 0xd0, 0xb0, 0x01, 0xe6, 0x02, 0x62, + 0x35, 0xb1, 0xe3, 0x2a, 0xee, 0xca, 0xcb, 0x0c, 0xab, 0x82, 0x48, 0x50, 0x5c, 0x71, 0xda, 0x2d, + 0xdf, 0x18, 0x7e, 0x47, 0x18, 0x07, 0x24, 0x60, 0x0e, 0xe3, 0xc4, 0xe3, 0x61, 0x87, 0x26, 0x7c, + 0x50, 0x12, 0x5b, 0xcc, 0x89, 0x2b, 0x58, 0x7d, 0xdf, 0xe7, 0x0e, 0xf5, 0x58, 0x71, 0x22, 0x9a, + 0x93, 0xdd, 0x98, 0x1c, 0x25, 0xac, 0xe0, 0x0e, 0x58, 0x16, 0x7d, 0xfd, 0x65, 0x18, 0xa0, 0xfa, + 0xd4, 0xc7, 0x9e, 0xb8, 0xa5, 0xe2, 0xa4, 0x64, 0xad, 0xa2, 0x58, 0x01, 0x1b, 0x19, 0x7a, 0x94, + 0xe9, 0x05, 0x3f, 0x01, 0x8b, 0xe1, 0x0e, 0x30, 0x1d, 0xcf, 0x76, 0xbc, 0xba, 0xd8, 0x00, 0x92, + 0x00, 0x67, 0xcc, 0x6b, 0x82, 0x00, 0x1f, 0xa6, 0x95, 0xa7, 0x59, 0x42, 0x34, 0x0c, 0x02, 0x9f, + 0x80, 0x45, 0x19, 0x91, 0xd8, 0x6a, 0xe8, 0x1d, 0xc2, 0x8a, 0xd3, 0xb2, 0x74, 0x6b, 0xf1, 0xd2, + 0x89, 0xab, 0x0b, 0xd9, 0x3b, 0x24, 0x83, 0x1a, 0x69, 0x12, 0x8b, 0xd3, 0xe0, 0x90, 0x04, 0xae, + 0xf9, 0x86, 0xaa, 0xd7, 0xe2, 0x46, 0x1a, 0x0a, 0x0d, 0xa3, 0xaf, 0xdc, 0x03, 0xf3, 0xa9, 0x82, + 0xc3, 0x05, 0x90, 0x6b, 0x90, 0x4e, 0x48, 0x6a, 0x48, 0xfc, 0x84, 0xcb, 0x60, 0xa2, 0x8d, 0x9b, + 0x2d, 0x12, 0x36, 0x1f, 0x0a, 0x3f, 0xee, 0x8e, 0xdf, 0xd1, 0xf4, 0xdf, 0x34, 0xb0, 0x10, 0xef, + 0x9e, 0x0b, 0xe0, 0x89, 0x07, 0x49, 0x9e, 0xb8, 0x72, 0x8e, 0x9e, 0x1e, 0x41, 0x16, 0x5f, 0x6b, + 0x60, 0x36, 0xbe, 0xea, 0xe0, 0xbb, 0x60, 0x1a, 0xb7, 0x6c, 0x87, 0x78, 0x56, 0x9f, 0xd3, 0x07, + 0x89, 0x6c, 0x28, 0x39, 0x1a, 0x58, 0x88, 0x45, 0x48, 0x9e, 0xfa, 0x4e, 0x80, 0x45, 0x93, 0xd5, + 0x88, 0x45, 0x3d, 0x9b, 0xc9, 0x1b, 0xca, 0x85, 0x8b, 0xb0, 0x9a, 0x56, 0xa2, 0x61, 0x7b, 0xfd, + 0x87, 0x71, 0xb0, 0x10, 0xf6, 0x46, 0xf8, 0x04, 0x72, 0x89, 0xc7, 0x2f, 0x80, 0x54, 0x76, 0x13, + 0x1b, 0xf1, 0x9d, 0x33, 0x57, 0x46, 0x94, 0xd8, 0xa8, 0xd5, 0x08, 0x3f, 0x06, 0x93, 0x8c, 0x63, + 0xde, 0x12, 0x44, 0x23, 0x00, 0xaf, 0x9f, 0x17, 0x50, 0x3a, 0x45, 0x5b, 0x31, 0xfc, 0x46, 0x0a, + 0x4c, 0xff, 0x5d, 0x03, 0xcb, 0x69, 0x97, 0x0b, 0xe8, 0xb0, 0x9d, 0x64, 0x87, 0xbd, 0x75, 0xce, + 0xc3, 0x8c, 0xe8, 0xb2, 0x3f, 0x35, 0xf0, 0xbf, 0xa1, 0x73, 0xcb, 0xfd, 0x2b, 0x78, 0xc9, 0x4f, + 0xb1, 0xdf, 0x5e, 0xf4, 0x9e, 0x90, 0xbc, 0x74, 0x90, 0xa1, 0x47, 0x99, 0x5e, 0xf0, 0x31, 0x58, + 0x70, 0xbc, 0xa6, 0xe3, 0x91, 0x50, 0x56, 0x8b, 0xea, 0x9b, 0x49, 0x1e, 0x69, 0x64, 0x59, 0xdc, + 0xe5, 0x5e, 0xb7, 0xbc, 0xb0, 0x9d, 0x42, 0x41, 0x43, 0xb8, 0xfa, 0x1f, 0x19, 0x95, 0x91, 0x1b, + 0x57, 0x8c, 0x90, 0x94, 0x90, 0x60, 0x68, 0x84, 0x94, 0x1c, 0x0d, 0x2c, 0x64, 0xdf, 0xc8, 0xab, + 0x50, 0x89, 0x9e, 0xbb, 0x6f, 0xa4, 0x53, 0xac, 0x6f, 0xe4, 0x37, 0x52, 0x60, 0x22, 0x09, 0xf1, + 0xae, 0x92, 0x77, 0x99, 0x4b, 0x26, 0xb1, 0xa7, 0xe4, 0x68, 0x60, 0xa1, 0xff, 0x93, 0xcb, 0x28, + 0x90, 0x6c, 0xc0, 0xd8, 0x69, 0xfa, 0xff, 0x56, 0xd2, 0xa7, 0xb1, 0x07, 0xa7, 0xb1, 0xe1, 0xf7, + 0x1a, 0x80, 0x78, 0x00, 0xb1, 0xdb, 0x6f, 0xd0, 0xb0, 0x8b, 0xaa, 0xaf, 0x35, 0x12, 0xc6, 0xc6, + 0x10, 0x4e, 0xb8, 0x8d, 0x57, 0x54, 0x7c, 0x38, 0x6c, 0x80, 0x32, 0x82, 0x43, 0x1b, 0x14, 0x42, + 0x69, 0x35, 0x08, 0x68, 0xa0, 0xc6, 0x53, 0x3f, 0x33, 0x17, 0x69, 0x69, 0x96, 0xe4, 0xd3, 0x30, + 0x72, 0x3d, 0xed, 0x96, 0x0b, 0x31, 0x3d, 0x8a, 0xc3, 0x8a, 0x28, 0x36, 0x89, 0xa2, 0xe4, 0x5f, + 0x2f, 0xca, 0x16, 0x19, 0x1d, 0x25, 0x06, 0xbb, 0x52, 0x05, 0xff, 0x1f, 0x71, 0x2d, 0xaf, 0xb5, + 0xb3, 0xbe, 0xd1, 0x40, 0x3c, 0x06, 0xdc, 0x01, 0x79, 0xee, 0xa8, 0xa9, 0x4b, 0x3e, 0x9f, 0xcf, + 0x20, 0x92, 0x43, 0xc7, 0x25, 0x11, 0x15, 0x8a, 0x2f, 0x24, 0x51, 0xe0, 0xdb, 0x60, 0xca, 0x25, + 0x8c, 0xe1, 0xba, 0x8a, 0x1c, 0x3d, 0x26, 0x77, 0x43, 0x31, 0xea, 0xeb, 0xf5, 0xdb, 0x60, 0x29, + 0xe3, 0x51, 0x0e, 0xcb, 0x60, 0xc2, 0x92, 0xff, 0x71, 0x45, 0x42, 0x13, 0xe6, 0x8c, 0x60, 0x94, + 0x4d, 0xf9, 0xd7, 0x36, 0x94, 0x9b, 0x6b, 0xcf, 0x5e, 0x96, 0xc6, 0x9e, 0xbf, 0x2c, 0x8d, 0xbd, + 0x78, 0x59, 0x1a, 0xfb, 0xaa, 0x57, 0xd2, 0x9e, 0xf5, 0x4a, 0xda, 0xf3, 0x5e, 0x49, 0x7b, 0xd1, + 0x2b, 0x69, 0x7f, 0xf5, 0x4a, 0xda, 0x77, 0x7f, 0x97, 0xc6, 0x3e, 0x1d, 0x6f, 0xaf, 0xff, 0x17, + 0x00, 0x00, 0xff, 0xff, 0x02, 0xb2, 0x6f, 0xe2, 0x3e, 0x12, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -721,6 +756,30 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RequiresRepublish != nil { + i-- + if *m.RequiresRepublish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TokenRequests) > 0 { + for iNdEx := len(m.TokenRequests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.FSGroupPolicy != nil { i -= len(*m.FSGroupPolicy) copy(dAtA[i:], *m.FSGroupPolicy) @@ -1107,6 +1166,39 @@ func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1503,6 +1595,15 @@ func (m *CSIDriverSpec) Size() (n int) { l = len(*m.FSGroupPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TokenRequests) > 0 { + for _, e := range m.TokenRequests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RequiresRepublish != nil { + n += 2 + } return n } @@ -1635,6 +1736,20 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + func (m *VolumeAttachment) Size() (n int) { if m == nil { return 0 @@ -1787,12 +1902,19 @@ func (this *CSIDriverSpec) String() string { if this == nil { return "nil" } + repeatedStringForTokenRequests := "[]TokenRequest{" + for _, f := range this.TokenRequests { + repeatedStringForTokenRequests += strings.Replace(strings.Replace(f.String(), "TokenRequest", "TokenRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForTokenRequests += "}" s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `StorageCapacity:` + valueToStringGenerated(this.StorageCapacity) + `,`, `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, + `TokenRequests:` + repeatedStringForTokenRequests + `,`, + `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, `}`, }, "") return s @@ -1900,6 +2022,17 @@ func (this *StorageClassList) String() string { }, "") return s } +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *VolumeAttachment) String() string { if this == nil { return "nil" @@ -2104,10 +2237,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2224,10 +2354,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2399,16 +2526,68 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { s := FSGroupPolicy(dAtA[iNdEx:postIndex]) m.FSGroupPolicy = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenRequests = append(m.TokenRequests, TokenRequest{}) + if err := m.TokenRequests[len(m.TokenRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresRepublish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RequiresRepublish = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2524,10 +2703,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2709,10 +2885,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2829,10 +3002,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2916,10 +3086,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3144,7 +3311,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3314,10 +3481,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3434,10 +3598,109 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3586,10 +3849,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3706,10 +3966,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3828,10 +4085,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3978,10 +4232,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4161,7 +4412,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -4250,10 +4501,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4368,10 +4616,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4441,10 +4686,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index a3526ca4e..d6b4d9cbd 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1; @@ -146,6 +146,43 @@ message CSIDriverSpec { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional optional string fsGroupPolicy = 5; + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + repeated TokenRequest tokenRequests = 6; + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + optional bool requiresRepublish = 7; } // CSINode holds information about all CSI drivers installed on a node. @@ -281,6 +318,19 @@ message StorageClassList { repeated StorageClass items = 2; } +// TokenRequest contains parameters of a service account token. +message TokenRequest { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + optional string audience = 1; + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". + // + // +optional + optional int64 expirationSeconds = 2; +} + // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. // diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 27e06debb..18d0fb877 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -344,6 +344,43 @@ type CSIDriverSpec struct { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -381,6 +418,20 @@ const ( // provided by a CSI driver. More modes may be added in the future. type VolumeLifecycleMode string +// TokenRequest contains parameters of a service account token. +type TokenRequest struct { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + // + Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". + // + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` +} + const ( // VolumeLifecyclePersistent explicitly confirms that the driver implements // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 606cda4db..0e28b1d2f 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -54,6 +54,8 @@ var map_CSIDriverSpec = map[string]string{ "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.", "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false.", "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -127,6 +129,16 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_TokenRequest = map[string]string{ + "": "TokenRequest contains parameters of a service account token.", + "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 5eb0225a0..f4de94216 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -113,6 +113,18 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(FSGroupPolicy) **out = **in } + if in.TokenRequests != nil { + in, out := &in.TokenRequests, &out.TokenRequests + *out = make([]TokenRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequiresRepublish != nil { + in, out := &in.RequiresRepublish, &out.RequiresRepublish + *out = new(bool) + **out = **in + } return } @@ -328,6 +340,27 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 1b7767fdc..38d357399 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -1210,10 +1210,7 @@ func (m *CSIStorageCapacity) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1330,10 +1327,7 @@ func (m *CSIStorageCapacityList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1482,10 +1476,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1602,10 +1593,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1724,10 +1712,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1874,10 +1859,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2057,7 +2039,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -2146,10 +2128,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2264,10 +2243,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 40a764051..d64345333 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1alpha1; diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index cec77515e..328d72145 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -298,10 +298,38 @@ func (m *StorageClassList) XXX_DiscardUnknown() { var xxx_messageInfo_StorageClassList proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{9} + return fileDescriptor_7d2980599fd0de80, []int{10} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +357,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{10} + return fileDescriptor_7d2980599fd0de80, []int{11} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{11} + return fileDescriptor_7d2980599fd0de80, []int{12} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{12} + return fileDescriptor_7d2980599fd0de80, []int{13} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{13} + return fileDescriptor_7d2980599fd0de80, []int{14} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +469,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{14} + return fileDescriptor_7d2980599fd0de80, []int{15} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +497,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{15} + return fileDescriptor_7d2980599fd0de80, []int{16} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -505,6 +533,7 @@ func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1beta1.TokenRequest") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") @@ -520,95 +549,102 @@ func init() { } var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1400 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x3d, 0x6f, 0xdb, 0x46, - 0x1f, 0x37, 0x2d, 0xc9, 0x2f, 0x27, 0x3b, 0x96, 0xcf, 0xc6, 0xf3, 0xe8, 0xd1, 0x20, 0x1a, 0x7a, - 0xd0, 0xc6, 0x09, 0x12, 0x2a, 0x31, 0xd2, 0x20, 0x08, 0x90, 0xc1, 0x72, 0xdc, 0x46, 0x89, 0xe5, - 0xb8, 0x27, 0x23, 0x28, 0x82, 0x0e, 0x3d, 0x91, 0x67, 0x99, 0xb1, 0xc8, 0x63, 0xc8, 0x93, 0x5a, - 0x6d, 0x9d, 0x3a, 0x17, 0x1d, 0xfa, 0x09, 0xfa, 0x15, 0x5a, 0xa0, 0x5d, 0x3a, 0x36, 0x53, 0x11, - 0x74, 0xca, 0x44, 0x34, 0xec, 0x47, 0x28, 0xba, 0x18, 0x1d, 0x8a, 0x3b, 0x9e, 0xc4, 0x17, 0x51, - 0xb1, 0xdd, 0xc1, 0x1b, 0xef, 0xff, 0xf2, 0xfb, 0xbf, 0xff, 0xef, 0x08, 0x76, 0x4e, 0xee, 0x79, - 0x9a, 0x49, 0xeb, 0x27, 0xfd, 0x0e, 0x71, 0x6d, 0xc2, 0x88, 0x57, 0x1f, 0x10, 0xdb, 0xa0, 0x6e, - 0x5d, 0x32, 0xb0, 0x63, 0xd6, 0x3d, 0x46, 0x5d, 0xdc, 0x25, 0xf5, 0xc1, 0xed, 0x0e, 0x61, 0xf8, - 0x76, 0xbd, 0x4b, 0x6c, 0xe2, 0x62, 0x46, 0x0c, 0xcd, 0x71, 0x29, 0xa3, 0xb0, 0x12, 0xca, 0x6a, - 0xd8, 0x31, 0x35, 0x29, 0xab, 0x49, 0xd9, 0xca, 0xcd, 0xae, 0xc9, 0x8e, 0xfb, 0x1d, 0x4d, 0xa7, - 0x56, 0xbd, 0x4b, 0xbb, 0xb4, 0x2e, 0x54, 0x3a, 0xfd, 0x23, 0x71, 0x12, 0x07, 0xf1, 0x15, 0x42, - 0x55, 0x6a, 0x31, 0xb3, 0x3a, 0x75, 0xb9, 0xcd, 0xb4, 0xb9, 0xca, 0x9d, 0x48, 0xc6, 0xc2, 0xfa, - 0xb1, 0x69, 0x13, 0x77, 0x58, 0x77, 0x4e, 0xba, 0x9c, 0xe0, 0xd5, 0x2d, 0xc2, 0x70, 0x96, 0x56, - 0x7d, 0x9a, 0x96, 0xdb, 0xb7, 0x99, 0x69, 0x91, 0x09, 0x85, 0xbb, 0x67, 0x29, 0x78, 0xfa, 0x31, - 0xb1, 0x70, 0x5a, 0xaf, 0xf6, 0x93, 0x02, 0x16, 0x77, 0xda, 0xcd, 0x87, 0xae, 0x39, 0x20, 0x2e, - 0xfc, 0x0c, 0x2c, 0x70, 0x8f, 0x0c, 0xcc, 0x70, 0x59, 0xd9, 0x50, 0x36, 0x8b, 0x5b, 0xb7, 0xb4, - 0x28, 0x5d, 0x63, 0x60, 0xcd, 0x39, 0xe9, 0x72, 0x82, 0xa7, 0x71, 0x69, 0x6d, 0x70, 0x5b, 0x7b, - 0xda, 0x79, 0x41, 0x74, 0xd6, 0x22, 0x0c, 0x37, 0xe0, 0x2b, 0x5f, 0x9d, 0x09, 0x7c, 0x15, 0x44, - 0x34, 0x34, 0x46, 0x85, 0x4f, 0x40, 0xde, 0x73, 0x88, 0x5e, 0x9e, 0x15, 0xe8, 0xd7, 0xb4, 0xe9, - 0xc5, 0xd0, 0xc6, 0x6e, 0xb5, 0x1d, 0xa2, 0x37, 0x96, 0x24, 0x6c, 0x9e, 0x9f, 0x90, 0x00, 0xa9, - 0xfd, 0xa8, 0x80, 0xe5, 0xb1, 0xd4, 0x9e, 0xe9, 0x31, 0xf8, 0xe9, 0x44, 0x00, 0xda, 0xf9, 0x02, - 0xe0, 0xda, 0xc2, 0xfd, 0x92, 0xb4, 0xb3, 0x30, 0xa2, 0xc4, 0x9c, 0x7f, 0x0c, 0x0a, 0x26, 0x23, - 0x96, 0x57, 0x9e, 0xdd, 0xc8, 0x6d, 0x16, 0xb7, 0xde, 0x3b, 0x97, 0xf7, 0x8d, 0x65, 0x89, 0x58, - 0x68, 0x72, 0x5d, 0x14, 0x42, 0xd4, 0xfe, 0x9a, 0x8d, 0xf9, 0xce, 0x63, 0x82, 0xf7, 0xc1, 0x15, - 0xcc, 0x18, 0xd6, 0x8f, 0x11, 0x79, 0xd9, 0x37, 0x5d, 0x62, 0x88, 0x08, 0x16, 0x1a, 0x30, 0xf0, - 0xd5, 0x2b, 0xdb, 0x09, 0x0e, 0x4a, 0x49, 0x72, 0x5d, 0x87, 0x1a, 0x4d, 0xfb, 0x88, 0x3e, 0xb5, - 0x5b, 0xb4, 0x6f, 0x33, 0x91, 0x60, 0xa9, 0x7b, 0x90, 0xe0, 0xa0, 0x94, 0x24, 0xd4, 0xc1, 0xfa, - 0x80, 0xf6, 0xfa, 0x16, 0xd9, 0x33, 0x8f, 0x88, 0x3e, 0xd4, 0x7b, 0xa4, 0x45, 0x0d, 0xe2, 0x95, - 0x73, 0x1b, 0xb9, 0xcd, 0xc5, 0x46, 0x3d, 0xf0, 0xd5, 0xf5, 0x67, 0x19, 0xfc, 0x53, 0x5f, 0x5d, - 0xcb, 0xa0, 0xa3, 0x4c, 0x30, 0xf8, 0x00, 0xac, 0xc8, 0x0c, 0xed, 0x60, 0x07, 0xeb, 0x26, 0x1b, - 0x96, 0xf3, 0xc2, 0xc3, 0xb5, 0xc0, 0x57, 0x57, 0xda, 0x49, 0x16, 0x4a, 0xcb, 0xc2, 0x47, 0x60, - 0xf9, 0xc8, 0xfb, 0xc8, 0xa5, 0x7d, 0xe7, 0x80, 0xf6, 0x4c, 0x7d, 0x58, 0x2e, 0x6c, 0x28, 0x9b, - 0x8b, 0x8d, 0x5a, 0xe0, 0xab, 0xcb, 0x1f, 0xb6, 0x63, 0x8c, 0xd3, 0x34, 0x01, 0x25, 0x15, 0x6b, - 0x3f, 0x28, 0x60, 0x7e, 0xa7, 0xdd, 0xdc, 0xa7, 0x06, 0xb9, 0x84, 0x76, 0x6f, 0x26, 0xda, 0xfd, - 0xea, 0x19, 0x0d, 0xc3, 0x9d, 0x9a, 0xda, 0xec, 0x7f, 0x86, 0xcd, 0xce, 0x65, 0xe4, 0xb4, 0x6e, - 0x80, 0xbc, 0x8d, 0x2d, 0x22, 0x5c, 0x5f, 0x8c, 0x74, 0xf6, 0xb1, 0x45, 0x90, 0xe0, 0xc0, 0xf7, - 0xc1, 0x9c, 0x4d, 0x0d, 0xd2, 0x7c, 0x28, 0x1c, 0x58, 0x6c, 0x5c, 0x91, 0x32, 0x73, 0xfb, 0x82, - 0x8a, 0x24, 0x17, 0xde, 0x01, 0x4b, 0x8c, 0x3a, 0xb4, 0x47, 0xbb, 0xc3, 0x27, 0x64, 0x38, 0x2a, - 0x7d, 0x29, 0xf0, 0xd5, 0xa5, 0xc3, 0x18, 0x1d, 0x25, 0xa4, 0x60, 0x07, 0x14, 0x71, 0xaf, 0x47, - 0x75, 0xcc, 0x70, 0xa7, 0x47, 0x44, 0x3d, 0x8b, 0x5b, 0xf5, 0x77, 0xc5, 0x18, 0xf6, 0x0b, 0x37, - 0x8e, 0x88, 0x47, 0xfb, 0xae, 0x4e, 0xbc, 0xc6, 0x4a, 0xe0, 0xab, 0xc5, 0xed, 0x08, 0x07, 0xc5, - 0x41, 0x6b, 0xdf, 0x2b, 0xa0, 0x28, 0xa3, 0xbe, 0x84, 0x01, 0x7f, 0x94, 0x1c, 0xf0, 0xff, 0x9f, - 0xa3, 0x5e, 0x53, 0xc6, 0x5b, 0x1f, 0xbb, 0x2d, 0x66, 0xfb, 0x10, 0xcc, 0x1b, 0xa2, 0x68, 0x5e, - 0x59, 0x11, 0xd0, 0xd7, 0xce, 0x01, 0x2d, 0xf7, 0xc7, 0x8a, 0x34, 0x30, 0x1f, 0x9e, 0x3d, 0x34, - 0x82, 0xaa, 0x7d, 0x33, 0x07, 0x96, 0x46, 0xa3, 0xd3, 0xc3, 0x9e, 0x77, 0x09, 0x0d, 0xfd, 0x01, - 0x28, 0x3a, 0x2e, 0x1d, 0x98, 0x9e, 0x49, 0x6d, 0xe2, 0xca, 0xb6, 0x5a, 0x93, 0x2a, 0xc5, 0x83, - 0x88, 0x85, 0xe2, 0x72, 0xb0, 0x07, 0x80, 0x83, 0x5d, 0x6c, 0x11, 0xc6, 0x53, 0x90, 0x13, 0x29, - 0xb8, 0xf7, 0xae, 0x14, 0xc4, 0xc3, 0xd2, 0x0e, 0xc6, 0xaa, 0xbb, 0x36, 0x73, 0x87, 0x91, 0x8b, - 0x11, 0x03, 0xc5, 0xf0, 0xe1, 0x09, 0x58, 0x76, 0x89, 0xde, 0xc3, 0xa6, 0x25, 0xb7, 0x45, 0x5e, - 0xb8, 0xb9, 0xcb, 0xb7, 0x05, 0x8a, 0x33, 0x4e, 0x7d, 0xf5, 0xd6, 0xe4, 0x1d, 0xae, 0x1d, 0x10, - 0xd7, 0x33, 0x3d, 0x46, 0x6c, 0x16, 0x36, 0x6c, 0x42, 0x07, 0x25, 0xb1, 0xf9, 0xec, 0x58, 0x7c, - 0x8f, 0x3e, 0x75, 0x98, 0x49, 0x6d, 0xaf, 0x5c, 0x88, 0x66, 0xa7, 0x15, 0xa3, 0xa3, 0x84, 0x14, - 0xdc, 0x03, 0xeb, 0xbc, 0xcd, 0x3f, 0x0f, 0x0d, 0xec, 0x7e, 0xe1, 0x60, 0x9b, 0xa7, 0xaa, 0x3c, - 0x27, 0x96, 0x62, 0x99, 0x2f, 0xdd, 0xed, 0x0c, 0x3e, 0xca, 0xd4, 0x82, 0x9f, 0x80, 0xd5, 0x70, - 0xeb, 0x36, 0x4c, 0xdb, 0x30, 0xed, 0x2e, 0xdf, 0xb9, 0xe5, 0x79, 0x11, 0xf4, 0xf5, 0xc0, 0x57, - 0x57, 0x9f, 0xa5, 0x99, 0xa7, 0x59, 0x44, 0x34, 0x09, 0x02, 0x5f, 0x82, 0x55, 0x61, 0x91, 0x18, - 0x72, 0x11, 0x98, 0xc4, 0x2b, 0x2f, 0x88, 0xfa, 0x6d, 0xc6, 0xeb, 0xc7, 0x53, 0xc7, 0x1b, 0x69, - 0xb4, 0x2e, 0xda, 0xa4, 0x47, 0x74, 0x46, 0xdd, 0x43, 0xe2, 0x5a, 0x8d, 0xff, 0xc9, 0x7a, 0xad, - 0x6e, 0xa7, 0xa1, 0xd0, 0x24, 0x7a, 0xe5, 0x01, 0x58, 0x49, 0x15, 0x1c, 0x96, 0x40, 0xee, 0x84, - 0x0c, 0xc3, 0x45, 0x87, 0xf8, 0x27, 0x5c, 0x07, 0x85, 0x01, 0xee, 0xf5, 0x49, 0xd8, 0x81, 0x28, - 0x3c, 0xdc, 0x9f, 0xbd, 0xa7, 0xd4, 0x7e, 0x56, 0x40, 0x29, 0xde, 0x3d, 0x97, 0xb0, 0x36, 0x5a, - 0xc9, 0xb5, 0xb1, 0x79, 0xde, 0xc6, 0x9e, 0xb2, 0x3b, 0xbe, 0x9b, 0x05, 0xa5, 0xb0, 0x38, 0xe1, - 0xad, 0x6f, 0x11, 0x9b, 0x5d, 0xc2, 0x68, 0xa3, 0xc4, 0x5d, 0x75, 0xeb, 0xec, 0x3d, 0x1e, 0x79, - 0x37, 0xed, 0xd2, 0x82, 0xcf, 0xc1, 0x9c, 0xc7, 0x30, 0xeb, 0xf3, 0x99, 0xe7, 0xa8, 0x5b, 0x17, - 0x42, 0x15, 0x9a, 0xd1, 0xa5, 0x15, 0x9e, 0x91, 0x44, 0xac, 0xfd, 0xa2, 0x80, 0xf5, 0xb4, 0xca, - 0x25, 0x14, 0xfb, 0xe3, 0x64, 0xb1, 0x6f, 0x5c, 0x24, 0xa2, 0x29, 0x05, 0xff, 0x4d, 0x01, 0xff, - 0x99, 0x08, 0x5e, 0x5c, 0x8f, 0x7c, 0x4f, 0x38, 0xa9, 0x6d, 0xb4, 0x1f, 0xdd, 0xf9, 0x62, 0x4f, - 0x1c, 0x64, 0xf0, 0x51, 0xa6, 0x16, 0x7c, 0x01, 0x4a, 0xa6, 0xdd, 0x33, 0x6d, 0x12, 0xd2, 0xda, - 0x51, 0xb9, 0x33, 0x87, 0x39, 0x8d, 0x2c, 0xca, 0xbc, 0x1e, 0xf8, 0x6a, 0xa9, 0x99, 0x42, 0x41, - 0x13, 0xb8, 0xb5, 0x5f, 0x33, 0xca, 0x23, 0xee, 0xc2, 0x1b, 0x60, 0x21, 0x7c, 0xbd, 0x12, 0x57, - 0x86, 0x31, 0x4e, 0xf7, 0xb6, 0xa4, 0xa3, 0xb1, 0x84, 0xe8, 0x20, 0x91, 0x0a, 0xe9, 0xe8, 0xc5, - 0x3a, 0x48, 0x68, 0xc6, 0x3a, 0x48, 0x9c, 0x91, 0x44, 0xe4, 0x9e, 0xf0, 0x07, 0x90, 0x48, 0x68, - 0x2e, 0xe9, 0xc9, 0xbe, 0xa4, 0xa3, 0xb1, 0x44, 0xed, 0xef, 0x5c, 0x46, 0x95, 0x44, 0x2b, 0xc6, - 0x42, 0x1a, 0x3d, 0xda, 0xd3, 0x21, 0x19, 0xe3, 0x90, 0x0c, 0xf8, 0xad, 0x02, 0x20, 0x1e, 0x43, - 0xb4, 0x46, 0xad, 0x1a, 0xf6, 0xd3, 0xe3, 0x8b, 0x4f, 0x88, 0xb6, 0x3d, 0x01, 0x16, 0xde, 0x93, - 0x15, 0xe9, 0x04, 0x9c, 0x14, 0x40, 0x19, 0x1e, 0x40, 0x13, 0x14, 0x43, 0xea, 0xae, 0xeb, 0x52, - 0x57, 0x8e, 0xec, 0xd5, 0xb3, 0x1d, 0x12, 0xe2, 0x8d, 0xaa, 0x78, 0xc8, 0x45, 0xfa, 0xa7, 0xbe, - 0x5a, 0x8c, 0xf1, 0x51, 0x1c, 0x9b, 0x9b, 0x32, 0x48, 0x64, 0x2a, 0xff, 0x2f, 0x4c, 0x3d, 0x24, - 0xd3, 0x4d, 0xc5, 0xb0, 0x2b, 0xbb, 0xe0, 0xbf, 0x53, 0x12, 0x74, 0xa1, 0x7b, 0xe5, 0x2b, 0x05, - 0xc4, 0x6d, 0xc0, 0x3d, 0x90, 0xe7, 0x3f, 0xd6, 0x72, 0xc3, 0x5c, 0x3f, 0xdf, 0x86, 0x39, 0x34, - 0x2d, 0x12, 0x2d, 0x4a, 0x7e, 0x42, 0x02, 0x05, 0x5e, 0x03, 0xf3, 0x16, 0xf1, 0x3c, 0xdc, 0x95, - 0x96, 0xa3, 0x57, 0x5f, 0x2b, 0x24, 0xa3, 0x11, 0xbf, 0x76, 0x17, 0xac, 0x65, 0xbc, 0xa3, 0xa1, - 0x0a, 0x0a, 0xba, 0xf8, 0xf3, 0xe3, 0x0e, 0x15, 0x1a, 0x8b, 0x7c, 0xcb, 0xec, 0x88, 0x1f, 0xbe, - 0x90, 0xde, 0xb8, 0xf9, 0xea, 0x6d, 0x75, 0xe6, 0xf5, 0xdb, 0xea, 0xcc, 0x9b, 0xb7, 0xd5, 0x99, - 0x2f, 0x83, 0xaa, 0xf2, 0x2a, 0xa8, 0x2a, 0xaf, 0x83, 0xaa, 0xf2, 0x26, 0xa8, 0x2a, 0xbf, 0x07, - 0x55, 0xe5, 0xeb, 0x3f, 0xaa, 0x33, 0xcf, 0xe7, 0x65, 0xbe, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, - 0x4b, 0x3f, 0x49, 0x6e, 0x6d, 0x11, 0x00, 0x00, + // 1508 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0x1b, 0x47, + 0x16, 0xd7, 0x8a, 0xd4, 0xd7, 0x50, 0xb2, 0xa4, 0x91, 0x7c, 0xc7, 0x53, 0x41, 0x0a, 0x3c, 0xdc, + 0x59, 0x36, 0xec, 0xa5, 0x2d, 0xf8, 0x0c, 0xc3, 0x80, 0x0b, 0xad, 0xac, 0x3b, 0xcb, 0x96, 0x64, + 0xdd, 0x50, 0x30, 0x0e, 0xc6, 0x15, 0x19, 0xee, 0x3e, 0x51, 0x6b, 0x71, 0x77, 0xd6, 0x3b, 0x43, + 0xc5, 0xec, 0x92, 0x26, 0x75, 0x90, 0x22, 0x7d, 0x80, 0xfc, 0x0b, 0x09, 0x90, 0x34, 0x29, 0xe3, + 0x2a, 0x30, 0x52, 0xb9, 0x22, 0x62, 0xe6, 0x4f, 0x48, 0x27, 0xa4, 0x08, 0x66, 0x76, 0xc8, 0xfd, + 0x20, 0x69, 0x49, 0x29, 0xd4, 0x71, 0xde, 0xc7, 0xef, 0xbd, 0x79, 0xef, 0xcd, 0xef, 0x2d, 0xd1, + 0xe6, 0xf1, 0x7d, 0x6e, 0xba, 0xac, 0x7a, 0xdc, 0xaa, 0x43, 0xe8, 0x83, 0x00, 0x5e, 0x3d, 0x01, + 0xdf, 0x61, 0x61, 0x55, 0x2b, 0x68, 0xe0, 0x56, 0xb9, 0x60, 0x21, 0x6d, 0x40, 0xf5, 0xe4, 0x4e, + 0x1d, 0x04, 0xbd, 0x53, 0x6d, 0x80, 0x0f, 0x21, 0x15, 0xe0, 0x98, 0x41, 0xc8, 0x04, 0xc3, 0x2b, + 0x91, 0xad, 0x49, 0x03, 0xd7, 0xd4, 0xb6, 0xa6, 0xb6, 0x5d, 0xb9, 0xd5, 0x70, 0xc5, 0x51, 0xab, + 0x6e, 0xda, 0xcc, 0xab, 0x36, 0x58, 0x83, 0x55, 0x95, 0x4b, 0xbd, 0x75, 0xa8, 0x4e, 0xea, 0xa0, + 0x7e, 0x45, 0x50, 0x2b, 0x95, 0x44, 0x58, 0x9b, 0x85, 0x32, 0x66, 0x36, 0xdc, 0xca, 0xdd, 0xd8, + 0xc6, 0xa3, 0xf6, 0x91, 0xeb, 0x43, 0xd8, 0xae, 0x06, 0xc7, 0x0d, 0x29, 0xe0, 0x55, 0x0f, 0x04, + 0x1d, 0xe6, 0x55, 0x1d, 0xe5, 0x15, 0xb6, 0x7c, 0xe1, 0x7a, 0x30, 0xe0, 0x70, 0xef, 0x2c, 0x07, + 0x6e, 0x1f, 0x81, 0x47, 0xb3, 0x7e, 0x95, 0xef, 0x0d, 0x34, 0xb3, 0x59, 0xdb, 0x7e, 0x14, 0xba, + 0x27, 0x10, 0xe2, 0x8f, 0xd0, 0xb4, 0xcc, 0xc8, 0xa1, 0x82, 0x16, 0x8d, 0x55, 0x63, 0xad, 0xb0, + 0x7e, 0xdb, 0x8c, 0xcb, 0xd5, 0x07, 0x36, 0x83, 0xe3, 0x86, 0x14, 0x70, 0x53, 0x5a, 0x9b, 0x27, + 0x77, 0xcc, 0x67, 0xf5, 0x97, 0x60, 0x8b, 0x5d, 0x10, 0xd4, 0xc2, 0x6f, 0x3a, 0xe5, 0xb1, 0x6e, + 0xa7, 0x8c, 0x62, 0x19, 0xe9, 0xa3, 0xe2, 0xa7, 0x28, 0xcf, 0x03, 0xb0, 0x8b, 0xe3, 0x0a, 0xfd, + 0xba, 0x39, 0xba, 0x19, 0x66, 0x3f, 0xad, 0x5a, 0x00, 0xb6, 0x35, 0xab, 0x61, 0xf3, 0xf2, 0x44, + 0x14, 0x48, 0xe5, 0x3b, 0x03, 0xcd, 0xf5, 0xad, 0x76, 0x5c, 0x2e, 0xf0, 0xff, 0x07, 0x2e, 0x60, + 0x9e, 0xef, 0x02, 0xd2, 0x5b, 0xa5, 0xbf, 0xa0, 0xe3, 0x4c, 0xf7, 0x24, 0x89, 0xe4, 0x9f, 0xa0, + 0x09, 0x57, 0x80, 0xc7, 0x8b, 0xe3, 0xab, 0xb9, 0xb5, 0xc2, 0xfa, 0x3f, 0xce, 0x95, 0xbd, 0x35, + 0xa7, 0x11, 0x27, 0xb6, 0xa5, 0x2f, 0x89, 0x20, 0x2a, 0x5f, 0xe5, 0x13, 0xb9, 0xcb, 0x3b, 0xe1, + 0x07, 0xe8, 0x0a, 0x15, 0x82, 0xda, 0x47, 0x04, 0x5e, 0xb5, 0xdc, 0x10, 0x1c, 0x75, 0x83, 0x69, + 0x0b, 0x77, 0x3b, 0xe5, 0x2b, 0x1b, 0x29, 0x0d, 0xc9, 0x58, 0x4a, 0xdf, 0x80, 0x39, 0xdb, 0xfe, + 0x21, 0x7b, 0xe6, 0xef, 0xb2, 0x96, 0x2f, 0x54, 0x81, 0xb5, 0xef, 0x7e, 0x4a, 0x43, 0x32, 0x96, + 0xd8, 0x46, 0xcb, 0x27, 0xac, 0xd9, 0xf2, 0x60, 0xc7, 0x3d, 0x04, 0xbb, 0x6d, 0x37, 0x61, 0x97, + 0x39, 0xc0, 0x8b, 0xb9, 0xd5, 0xdc, 0xda, 0x8c, 0x55, 0xed, 0x76, 0xca, 0xcb, 0xcf, 0x87, 0xe8, + 0x4f, 0x3b, 0xe5, 0xa5, 0x21, 0x72, 0x32, 0x14, 0x0c, 0x3f, 0x44, 0xf3, 0xba, 0x42, 0x9b, 0x34, + 0xa0, 0xb6, 0x2b, 0xda, 0xc5, 0xbc, 0xca, 0x70, 0xa9, 0xdb, 0x29, 0xcf, 0xd7, 0xd2, 0x2a, 0x92, + 0xb5, 0xc5, 0x8f, 0xd1, 0xdc, 0x21, 0xff, 0x4f, 0xc8, 0x5a, 0xc1, 0x3e, 0x6b, 0xba, 0x76, 0xbb, + 0x38, 0xb1, 0x6a, 0xac, 0xcd, 0x58, 0x95, 0x6e, 0xa7, 0x3c, 0xf7, 0xef, 0x5a, 0x42, 0x71, 0x9a, + 0x15, 0x90, 0xb4, 0x23, 0x06, 0x34, 0x27, 0xd8, 0x31, 0xf8, 0xb2, 0x74, 0xc0, 0x05, 0x2f, 0x4e, + 0xaa, 0x5e, 0xae, 0x7d, 0xa8, 0x97, 0x07, 0x09, 0x07, 0xeb, 0xaa, 0x6e, 0xe7, 0x5c, 0x52, 0xca, + 0x49, 0x1a, 0x15, 0x6f, 0xa2, 0xc5, 0x30, 0x6a, 0x0e, 0x27, 0x10, 0xb4, 0xea, 0x4d, 0x97, 0x1f, + 0x15, 0xa7, 0xd4, 0x8d, 0xaf, 0x76, 0x3b, 0xe5, 0x45, 0x92, 0x55, 0x92, 0x41, 0xfb, 0xca, 0xb7, + 0x06, 0x9a, 0xda, 0xac, 0x6d, 0xef, 0x31, 0x07, 0x2e, 0xe1, 0x69, 0x6e, 0xa7, 0x9e, 0xe6, 0xb5, + 0x33, 0x86, 0x5b, 0x26, 0x35, 0xf2, 0x61, 0xfe, 0x16, 0x3d, 0x4c, 0x69, 0xa3, 0x99, 0x65, 0x15, + 0xe5, 0x7d, 0xea, 0x81, 0x4a, 0x7d, 0x26, 0xf6, 0xd9, 0xa3, 0x1e, 0x10, 0xa5, 0xc1, 0xff, 0x44, + 0x93, 0x3e, 0x73, 0x60, 0xfb, 0x91, 0x4a, 0x60, 0xc6, 0xba, 0xa2, 0x6d, 0x26, 0xf7, 0x94, 0x94, + 0x68, 0x2d, 0xbe, 0x8b, 0x66, 0x05, 0x0b, 0x58, 0x93, 0x35, 0xda, 0x4f, 0xa1, 0xdd, 0x1b, 0xd3, + 0x85, 0x6e, 0xa7, 0x3c, 0x7b, 0x90, 0x90, 0x93, 0x94, 0x15, 0xae, 0xa3, 0x02, 0x6d, 0x36, 0x99, + 0x4d, 0x05, 0xad, 0x37, 0x41, 0xcd, 0x5e, 0x61, 0xbd, 0xfa, 0xa1, 0x3b, 0x46, 0xb3, 0x2d, 0x83, + 0x13, 0xe0, 0xac, 0x15, 0xda, 0xc0, 0xad, 0xf9, 0x6e, 0xa7, 0x5c, 0xd8, 0x88, 0x71, 0x48, 0x12, + 0xb4, 0xf2, 0x8d, 0x81, 0x0a, 0xfa, 0xd6, 0x97, 0x40, 0x46, 0x8f, 0xd3, 0x64, 0xf4, 0xf7, 0x73, + 0xf4, 0x6b, 0x04, 0x15, 0xd9, 0xfd, 0xb4, 0x15, 0x0f, 0x1d, 0xa0, 0x29, 0x47, 0x35, 0x8d, 0x17, + 0x0d, 0x05, 0x7d, 0xfd, 0x1c, 0xd0, 0x9a, 0xeb, 0xe6, 0x75, 0x80, 0xa9, 0xe8, 0xcc, 0x49, 0x0f, + 0xaa, 0xf2, 0xc5, 0x24, 0x9a, 0xed, 0x3d, 0xf3, 0x26, 0xe5, 0xfc, 0x12, 0x06, 0xfa, 0x5f, 0xa8, + 0x10, 0x84, 0xec, 0xc4, 0xe5, 0x2e, 0xf3, 0x21, 0xd4, 0x63, 0xb5, 0xa4, 0x5d, 0x0a, 0xfb, 0xb1, + 0x8a, 0x24, 0xed, 0x70, 0x13, 0xa1, 0x80, 0x86, 0xd4, 0x03, 0x21, 0x4b, 0x90, 0x53, 0x25, 0xb8, + 0xff, 0xa1, 0x12, 0x24, 0xaf, 0x65, 0xee, 0xf7, 0x5d, 0xb7, 0x7c, 0x11, 0xb6, 0xe3, 0x14, 0x63, + 0x05, 0x49, 0xe0, 0xe3, 0x63, 0x34, 0x17, 0x82, 0xdd, 0xa4, 0xae, 0xa7, 0x99, 0x2d, 0xaf, 0xd2, + 0xdc, 0x92, 0x0c, 0x43, 0x92, 0x8a, 0xd3, 0x4e, 0xf9, 0xf6, 0xe0, 0xf7, 0x86, 0xb9, 0x0f, 0x21, + 0x77, 0xb9, 0x00, 0x5f, 0x44, 0x03, 0x9b, 0xf2, 0x21, 0x69, 0x6c, 0xf9, 0x76, 0x3c, 0xc9, 0xf9, + 0xcf, 0x02, 0xe1, 0x32, 0x9f, 0x17, 0x27, 0xe2, 0xb7, 0xb3, 0x9b, 0x90, 0x93, 0x94, 0x15, 0xde, + 0x41, 0xcb, 0x72, 0xcc, 0x3f, 0x8e, 0x02, 0x6c, 0xbd, 0x0e, 0xa8, 0x2f, 0x4b, 0x55, 0x9c, 0x54, + 0x74, 0x56, 0x94, 0x0b, 0x62, 0x63, 0x88, 0x9e, 0x0c, 0xf5, 0xc2, 0xff, 0x43, 0x8b, 0xd1, 0x86, + 0xb0, 0x5c, 0xdf, 0x71, 0xfd, 0x86, 0xdc, 0x0f, 0x8a, 0x19, 0x67, 0xac, 0x1b, 0x92, 0x19, 0x9f, + 0x67, 0x95, 0xa7, 0xc3, 0x84, 0x64, 0x10, 0x04, 0xbf, 0x42, 0x8b, 0x2a, 0x22, 0x38, 0x9a, 0x08, + 0x5c, 0xe0, 0xc5, 0xe9, 0x41, 0x7a, 0x97, 0xa5, 0x93, 0x83, 0xd4, 0xa3, 0x8b, 0x1a, 0x34, 0xc1, + 0x16, 0x2c, 0x3c, 0x80, 0xd0, 0xb3, 0xfe, 0xa6, 0xfb, 0xb5, 0xb8, 0x91, 0x85, 0x22, 0x83, 0xe8, + 0x2b, 0x0f, 0xd1, 0x7c, 0xa6, 0xe1, 0x78, 0x01, 0xe5, 0x8e, 0xa1, 0x1d, 0x11, 0x1d, 0x91, 0x3f, + 0xf1, 0x32, 0x9a, 0x38, 0xa1, 0xcd, 0x16, 0x44, 0x13, 0x48, 0xa2, 0xc3, 0x83, 0xf1, 0xfb, 0x46, + 0xe5, 0x07, 0x03, 0x2d, 0x24, 0xa7, 0xe7, 0x12, 0x68, 0x63, 0x37, 0x4d, 0x1b, 0x6b, 0xe7, 0x1d, + 0xec, 0x11, 0xdc, 0xf1, 0xa9, 0x81, 0x66, 0x93, 0x8b, 0x10, 0xdf, 0x44, 0xd3, 0xb4, 0xe5, 0xb8, + 0xe0, 0xdb, 0x3d, 0xb2, 0xef, 0x67, 0xb3, 0xa1, 0xe5, 0xa4, 0x6f, 0x21, 0xd7, 0x24, 0xbc, 0x0e, + 0xdc, 0x90, 0xca, 0x49, 0xab, 0x81, 0xcd, 0x7c, 0x87, 0xab, 0x32, 0xe5, 0xa2, 0x35, 0xb9, 0x95, + 0x55, 0x92, 0x41, 0xfb, 0xca, 0xd7, 0xe3, 0x68, 0x21, 0x1a, 0x90, 0xe8, 0x2b, 0xc9, 0x03, 0x5f, + 0x5c, 0x02, 0xbd, 0x90, 0xd4, 0xbe, 0xbc, 0x7d, 0xf6, 0x2e, 0x89, 0xb3, 0x1b, 0xb5, 0x38, 0xf1, + 0x0b, 0x34, 0xc9, 0x05, 0x15, 0x2d, 0xc9, 0x3b, 0x12, 0x75, 0xfd, 0x42, 0xa8, 0xca, 0x33, 0x5e, + 0x9c, 0xd1, 0x99, 0x68, 0xc4, 0xca, 0x8f, 0x06, 0x5a, 0xce, 0xba, 0x5c, 0xc2, 0xc0, 0xfd, 0x37, + 0x3d, 0x70, 0x37, 0x2f, 0x72, 0xa3, 0x11, 0x43, 0xf7, 0xb3, 0x81, 0xfe, 0x32, 0x70, 0x79, 0xb5, + 0xa2, 0x25, 0x57, 0x05, 0x19, 0x46, 0xdc, 0x8b, 0xbf, 0x3b, 0x14, 0x57, 0xed, 0x0f, 0xd1, 0x93, + 0xa1, 0x5e, 0xf8, 0x25, 0x5a, 0x70, 0xfd, 0xa6, 0xeb, 0x43, 0x24, 0xab, 0xc5, 0xed, 0x1e, 0x4a, + 0x28, 0x59, 0x64, 0xd5, 0xe6, 0xe5, 0x6e, 0xa7, 0xbc, 0xb0, 0x9d, 0x41, 0x21, 0x03, 0xb8, 0x95, + 0x9f, 0x86, 0xb4, 0x47, 0xed, 0x63, 0xf9, 0xa2, 0x94, 0x04, 0xc2, 0x81, 0x17, 0xa5, 0xe5, 0xa4, + 0x6f, 0xa1, 0x26, 0x48, 0x95, 0x42, 0x27, 0x7a, 0xb1, 0x09, 0x52, 0x9e, 0x89, 0x09, 0x52, 0x67, + 0xa2, 0x11, 0x65, 0x26, 0xf2, 0x23, 0x4c, 0x15, 0x34, 0x97, 0xce, 0x64, 0x4f, 0xcb, 0x49, 0xdf, + 0xa2, 0xf2, 0x7b, 0x6e, 0x48, 0x97, 0xd4, 0x28, 0x26, 0xae, 0xd4, 0xfb, 0x93, 0x93, 0xbd, 0x92, + 0xd3, 0xbf, 0x92, 0x83, 0xbf, 0x34, 0x10, 0xa6, 0x7d, 0x88, 0xdd, 0xde, 0xa8, 0x46, 0xf3, 0xf4, + 0xe4, 0xe2, 0x2f, 0xc4, 0xdc, 0x18, 0x00, 0x8b, 0x76, 0xf5, 0x8a, 0x4e, 0x02, 0x0f, 0x1a, 0x90, + 0x21, 0x19, 0x60, 0x17, 0x15, 0x22, 0xe9, 0x56, 0x18, 0xb2, 0x50, 0x3f, 0xd9, 0x6b, 0x67, 0x27, + 0xa4, 0xcc, 0xad, 0x92, 0xfa, 0x98, 0x8c, 0xfd, 0x4f, 0x3b, 0xe5, 0x42, 0x42, 0x4f, 0x92, 0xd8, + 0x32, 0x94, 0x03, 0x71, 0xa8, 0xfc, 0x9f, 0x08, 0xf5, 0x08, 0x46, 0x87, 0x4a, 0x60, 0xaf, 0x6c, + 0xa1, 0xbf, 0x8e, 0x28, 0xd0, 0x85, 0x76, 0xdb, 0x67, 0x06, 0x4a, 0xc6, 0xc0, 0x3b, 0x28, 0x2f, + 0x5c, 0xfd, 0x12, 0x0b, 0xeb, 0x37, 0xce, 0xc7, 0x30, 0x07, 0xae, 0x07, 0x31, 0x51, 0xca, 0x13, + 0x51, 0x28, 0xf8, 0x3a, 0x9a, 0xf2, 0x80, 0x73, 0xda, 0xd0, 0x91, 0xe3, 0x2f, 0xcf, 0xdd, 0x48, + 0x4c, 0x7a, 0xfa, 0xca, 0x3d, 0xb4, 0x34, 0xe4, 0x5b, 0x1e, 0x97, 0xd1, 0x84, 0xad, 0xfe, 0x29, + 0xcb, 0x84, 0x26, 0xac, 0x19, 0xc9, 0x32, 0x9b, 0xea, 0x0f, 0x72, 0x24, 0xb7, 0x6e, 0xbd, 0x79, + 0x5f, 0x1a, 0x7b, 0xfb, 0xbe, 0x34, 0xf6, 0xee, 0x7d, 0x69, 0xec, 0x93, 0x6e, 0xc9, 0x78, 0xd3, + 0x2d, 0x19, 0x6f, 0xbb, 0x25, 0xe3, 0x5d, 0xb7, 0x64, 0xfc, 0xd2, 0x2d, 0x19, 0x9f, 0xff, 0x5a, + 0x1a, 0x7b, 0x31, 0xa5, 0xeb, 0xfd, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xb1, 0x9d, 0x65, + 0x9d, 0x12, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -721,6 +757,30 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RequiresRepublish != nil { + i-- + if *m.RequiresRepublish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.TokenRequests) > 0 { + for iNdEx := len(m.TokenRequests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TokenRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.FSGroupPolicy != nil { i -= len(*m.FSGroupPolicy) copy(dAtA[i:], *m.FSGroupPolicy) @@ -1107,6 +1167,39 @@ func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1503,6 +1596,15 @@ func (m *CSIDriverSpec) Size() (n int) { l = len(*m.FSGroupPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.TokenRequests) > 0 { + for _, e := range m.TokenRequests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.RequiresRepublish != nil { + n += 2 + } return n } @@ -1635,6 +1737,20 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + func (m *VolumeAttachment) Size() (n int) { if m == nil { return 0 @@ -1787,12 +1903,19 @@ func (this *CSIDriverSpec) String() string { if this == nil { return "nil" } + repeatedStringForTokenRequests := "[]TokenRequest{" + for _, f := range this.TokenRequests { + repeatedStringForTokenRequests += strings.Replace(strings.Replace(f.String(), "TokenRequest", "TokenRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForTokenRequests += "}" s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `StorageCapacity:` + valueToStringGenerated(this.StorageCapacity) + `,`, `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, + `TokenRequests:` + repeatedStringForTokenRequests + `,`, + `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, `}`, }, "") return s @@ -1900,6 +2023,17 @@ func (this *StorageClassList) String() string { }, "") return s } +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *VolumeAttachment) String() string { if this == nil { return "nil" @@ -2104,10 +2238,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2224,10 +2355,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2399,16 +2527,68 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { s := FSGroupPolicy(dAtA[iNdEx:postIndex]) m.FSGroupPolicy = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenRequests = append(m.TokenRequests, TokenRequest{}) + if err := m.TokenRequests[len(m.TokenRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresRepublish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RequiresRepublish = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2524,10 +2704,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2709,10 +2886,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2829,10 +3003,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2916,10 +3087,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3144,7 +3312,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -3314,10 +3482,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3434,10 +3599,109 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3586,10 +3850,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3706,10 +3967,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3828,10 +4086,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3978,10 +4233,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4161,7 +4413,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -4250,10 +4502,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4368,10 +4617,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4441,10 +4687,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index e61876ed5..1eb4e1f88 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.api.storage.v1beta1; @@ -147,6 +147,43 @@ message CSIDriverSpec { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional optional string fsGroupPolicy = 5; + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + repeated TokenRequest tokenRequests = 6; + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + optional bool requiresRepublish = 7; } // DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. @@ -283,6 +320,19 @@ message StorageClassList { repeated StorageClass items = 2; } +// TokenRequest contains parameters of a service account token. +message TokenRequest { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + optional string audience = 1; + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" + // + // +optional + optional int64 expirationSeconds = 2; +} + // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. // diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 7946663a3..10df2baa7 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -364,6 +364,43 @@ type CSIDriverSpec struct { // that enable the CSIVolumeFSGroupPolicy feature gate. // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` + + // TokenRequests indicates the CSI driver needs pods' service account + // tokens it is mounting volume for to do necessary authentication. Kubelet + // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. + // The CSI driver should parse and validate the following VolumeContext: + // "csi.storage.k8s.io/serviceAccount.tokens": { + // "": { + // "token": , + // "expirationTimestamp": , + // }, + // ... + // } + // + // Note: Audience in each TokenRequest should be different and at + // most one token is empty string. To receive a new token after expiry, + // RequiresRepublish can be used to trigger NodePublishVolume periodically. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + // +listType=atomic + TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` + + // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // being periodically called to reflect any possible change in the mounted + // volume. This field defaults to false. + // + // Note: After a successful initial NodePublishVolume call, subsequent calls + // to NodePublishVolume should only update the contents of the volume. New + // mount points will not be seen by a running container. + // + // This is an alpha feature and only available when the + // CSIServiceAccountToken feature is enabled. + // + // +optional + RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -395,6 +432,20 @@ const ( // provided by a CSI driver. More modes may be added in the future. type VolumeLifecycleMode string +// TokenRequest contains parameters of a service account token. +type TokenRequest struct { + // Audience is the intended audience of the token in "TokenRequestSpec". + // It will default to the audiences of kube apiserver. + // + Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` + + // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" + // + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` +} + const ( // VolumeLifecyclePersistent explicitly confirms that the driver implements // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 60cc4c6a4..c51950d7c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -54,6 +54,8 @@ var map_CSIDriverSpec = map[string]string{ "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis is an alpha field and only available when the CSIStorageCapacity feature is enabled. The default is false.", "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.", + "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", + "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is an alpha feature and only available when the CSIServiceAccountToken feature is enabled.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -127,6 +129,16 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_TokenRequest = map[string]string{ + "": "TokenRequest contains parameters of a service account token.", + "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index a1538c131..89a102901 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -113,6 +113,18 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(FSGroupPolicy) **out = **in } + if in.TokenRequests != nil { + in, out := &in.TokenRequests, &out.TokenRequests + *out = make([]TokenRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequiresRepublish != nil { + in, out := &in.RequiresRepublish, &out.RequiresRepublish + *out = new(bool) + **out = **in + } return } @@ -328,6 +340,27 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 934790dcb..343a6f550 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -51,6 +51,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message + existingCondition.ObservedGeneration = newCondition.ObservedGeneration } // RemoveStatusCondition removes the corresponding conditionType from conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 9ca34c9fa..6a4116a04 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -40,8 +40,6 @@ func CommonAccessor(obj interface{}) (metav1.Common, error) { switch t := obj.(type) { case List: return t, nil - case metav1.ListInterface: - return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { return m, nil @@ -72,8 +70,6 @@ func ListAccessor(obj interface{}) (List, error) { switch t := obj.(type) { case List: return t, nil - case metav1.ListInterface: - return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { return m, nil diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 41b60d731..00bd86f51 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -65,6 +65,9 @@ type DefaultRESTMapper struct { } func (m *DefaultRESTMapper) String() string { + if m == nil { + return "" + } return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 18a6c7cd6..472104d54 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.api.resource; diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index d95e03aa9..8d718945d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -20,6 +20,7 @@ import ( "bytes" "errors" "fmt" + "math" "math/big" "strconv" "strings" @@ -120,7 +121,7 @@ const ( ) // MustParse turns the given string into a quantity or panics; for tests -// or others cases where you know the string is valid. +// or other cases where you know the string is valid. func MustParse(str string) Quantity { q, err := ParseQuantity(str) if err != nil { @@ -442,6 +443,36 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { } } +// AsApproximateFloat64 returns a float64 representation of the quantity which may +// lose precision. If the value of the quantity is outside the range of a float64 +// +Inf/-Inf will be returned. +func (q *Quantity) AsApproximateFloat64() float64 { + var base float64 + var exponent int + if q.d.Dec != nil { + base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64() + exponent = int(-q.d.Dec.Scale()) + } else { + base = float64(q.i.value) + exponent = int(q.i.scale) + } + if exponent == 0 { + return base + } + + // multiply by the appropriate exponential scale + switch q.Format { + case DecimalExponent, DecimalSI: + return base * math.Pow10(exponent) + default: + // fast path for exponents that can fit in 64 bits + if exponent > 0 && exponent < 7 { + return base * float64(int64(1)<<(exponent*10)) + } + return base * math.Pow(2, float64(exponent*10)) + } +} + // AsInt64 returns a representation of the current value as an int64 if a fast conversion // is possible. If false is returned, callers must use the inf.Dec form of this quantity. func (q *Quantity) AsInt64() (int64, bool) { @@ -598,6 +629,9 @@ const int64QuantityExpectedBytes = 18 // String is an expensive operation and caching this result significantly reduces the cost of // normal parse / marshal operations on Quantity. func (q *Quantity) String() string { + if q == nil { + return "" + } if len(q.s) == 0 { result := make([]byte, 0, int64QuantityExpectedBytes) number, suffix := q.CanonicalizeBytes(result) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 15b4c875a..40018601c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -26,6 +26,5 @@ reviewers: - mml - mbohlool - therc -- mqliang - kevin-wangzefeng - jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index e74a51099..e41a3901f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -4925,10 +4925,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5012,10 +5009,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5377,10 +5371,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5496,10 +5487,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5615,10 +5603,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5848,10 +5833,7 @@ func (m *Condition) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5965,10 +5947,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6160,10 +6139,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6232,10 +6208,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6325,10 +6298,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6412,10 +6382,7 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6497,10 +6464,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6614,10 +6578,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6731,10 +6692,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6848,10 +6806,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6965,10 +6920,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7114,10 +7066,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7263,10 +7212,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7426,7 +7372,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -7477,10 +7423,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7626,10 +7569,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7746,10 +7686,7 @@ func (m *List) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7915,10 +7852,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8207,10 +8141,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8460,10 +8391,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -8923,7 +8851,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -9050,7 +8978,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > postIndex { @@ -9199,10 +9127,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9422,10 +9347,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9508,10 +9430,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9628,10 +9547,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9681,10 +9597,7 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9819,10 +9732,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -9938,10 +9848,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10023,10 +9930,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10140,10 +10044,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10377,10 +10278,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10526,10 +10424,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10760,10 +10655,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10845,10 +10737,7 @@ func (m *TableOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -10936,10 +10825,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11053,10 +10939,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11170,10 +11053,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11255,10 +11135,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -11373,10 +11250,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index b72d43ff0..fd24483c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.apis.meta.v1; @@ -375,6 +375,7 @@ message GroupVersionResource { // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. +// +structType=atomic message LabelSelector { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go index bd4c6d9b5..54a0944af 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -34,6 +34,9 @@ type GroupResource struct { } func (gr *GroupResource) String() string { + if gr == nil { + return "" + } if len(gr.Group) == 0 { return gr.Resource } @@ -51,6 +54,9 @@ type GroupVersionResource struct { } func (gvr *GroupVersionResource) String() string { + if gvr == nil { + return "" + } return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -64,6 +70,9 @@ type GroupKind struct { } func (gk *GroupKind) String() string { + if gk == nil { + return "" + } if len(gk.Group) == 0 { return gk.Kind } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index ad989ad75..3c5a1518c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -201,6 +201,20 @@ func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { obj.Annotations[ann] = value } +// HasLabel returns a bool if passed in label exists +func HasLabel(obj ObjectMeta, label string) bool { + _, found := obj.Labels[label] + return found +} + +// SetMetaDataLabel sets the label and value +func SetMetaDataLabel(obj *ObjectMeta, label string, value string) { + if obj.Labels == nil { + obj.Labels = make(map[string]string) + } + obj.Labels[label] = value +} + // SingleObject returns a ListOptions for watching a single object. func SingleObject(meta ObjectMeta) ListOptions { return ListOptions{ diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index cdd9a6a7a..8eb37f436 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,8 +19,6 @@ package v1 import ( "encoding/json" "time" - - "github.com/google/gofuzz" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -181,16 +179,3 @@ func (t MicroTime) MarshalQueryParameter() (string, error) { return t.UTC().Format(RFC3339Micro), nil } - -// Fuzz satisfies fuzz.Interface. -func (t *MicroTime) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Accurate to a tenth of - // micro second. Leave off nanoseconds because JSON doesn't - // represent them so they can't round-trip properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) -} - -var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go new file mode 100644 index 000000000..befab16f7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -0,0 +1,39 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) +} + +// ensure MicroTime implements fuzz.Interface +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 4a1d89cfc..421770d43 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,8 +19,6 @@ package v1 import ( "encoding/json" "time" - - fuzz "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -182,16 +180,3 @@ func (t Time) MarshalQueryParameter() (string, error) { return t.UTC().Format(time.RFC3339), nil } - -// Fuzz satisfies fuzz.Interface. -func (t *Time) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Leave off nanoseconds - // because JSON doesn't represent them so they can't round-trip - // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) -} - -var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go new file mode 100644 index 000000000..94ad8d7cf --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -0,0 +1,39 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) +} + +// ensure Time implements fuzz.Interface +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index bb57f2cc4..d84878d7c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1092,6 +1092,7 @@ type Patch struct{} // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. +// +structType=atomic type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 54a231e49..7b101ea51 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -282,14 +282,6 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return 0 - } - return val -} - func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { val, found, err := NestedInt64(obj, fields...) if !found || err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index cd5fc9026..a5a949679 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -311,10 +311,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 59ce74376..a209dd456 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.apis.meta.v1beta1; diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 838d5b0aa..791348476 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -26,16 +26,6 @@ type typePair struct { dest reflect.Type } -type typeNamePair struct { - fieldType reflect.Type - fieldName string -} - -// DebugLogger allows you to get debugging messages if necessary. -type DebugLogger interface { - Logf(format string, args ...interface{}) -} - type NameFunc func(t reflect.Type) string var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } @@ -57,24 +47,6 @@ type Converter struct { ignoredConversions map[typePair]struct{} ignoredUntypedConversions map[typePair]struct{} - // This is a map from a source field type and name, to a list of destination - // field type and name. - structFieldDests map[typeNamePair][]typeNamePair - - // Allows for the opposite lookup of structFieldDests. So that SourceFromDest - // copy flag also works. So this is a map of destination field name, to potential - // source field name and type to look for. - structFieldSources map[typeNamePair][]typeNamePair - - // Map from an input type to a function which can apply a key name mapping - inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc - - // Map from an input type to a set of default conversion flags. - inputDefaultFlags map[reflect.Type]FieldMatchingFlags - - // If non-nil, will be called to print helpful debugging info. Quite verbose. - Debug DebugLogger - // nameFunc is called to retrieve the name of a type; this name is used for the // purpose of deciding whether two types match or not (i.e., will we attempt to // do a conversion). The default returns the go type name. @@ -89,11 +61,6 @@ func NewConverter(nameFn NameFunc) *Converter { ignoredConversions: make(map[typePair]struct{}), ignoredUntypedConversions: make(map[typePair]struct{}), nameFunc: nameFn, - structFieldDests: make(map[typeNamePair][]typeNamePair), - structFieldSources: make(map[typeNamePair][]typeNamePair), - - inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), - inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), } c.RegisterUntypedConversionFunc( (*[]byte)(nil), (*[]byte)(nil), @@ -112,11 +79,9 @@ func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { return &copied } -// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. -func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { - return c.inputDefaultFlags[t], &Meta{ - KeyNameMapping: c.inputFieldMappingFuncs[t], - } +// DefaultMeta returns meta for a given type. +func (c *Converter) DefaultMeta(t reflect.Type) *Meta { + return &Meta{} } // Convert_Slice_byte_To_Slice_byte prevents recursing into every byte @@ -136,24 +101,12 @@ func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { type Scope interface { // Call Convert to convert sub-objects. Note that if you call it with your own exact // parameters, you'll run out of stack space before anything useful happens. - Convert(src, dest interface{}, flags FieldMatchingFlags) error - - // SrcTags and DestTags contain the struct tags that src and dest had, respectively. - // If the enclosing object was not a struct, then these will contain no tags, of course. - SrcTag() reflect.StructTag - DestTag() reflect.StructTag - - // Flags returns the flags with which the conversion was started. - Flags() FieldMatchingFlags + Convert(src, dest interface{}) error // Meta returns any information originally passed to Convert. Meta() *Meta } -// FieldMappingFunc can convert an input field value into different values, depending on -// the value of the source or destination struct tags. -type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) - func NewConversionFuncs() ConversionFuncs { return ConversionFuncs{ untyped: make(map[typePair]ConversionFunc), @@ -194,9 +147,6 @@ func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { // Meta is supplied by Scheme, when it calls Convert. type Meta struct { - // KeyNameMapping is an optional function which may map the listed key (field name) - // into a source and destination value. - KeyNameMapping FieldMappingFunc // Context is an optional field that callers may use to pass info to conversion functions. Context interface{} } @@ -205,84 +155,11 @@ type Meta struct { type scope struct { converter *Converter meta *Meta - flags FieldMatchingFlags - - // srcStack & destStack are separate because they may not have a 1:1 - // relationship. - srcStack scopeStack - destStack scopeStack -} - -type scopeStackElem struct { - tag reflect.StructTag - value reflect.Value - key string -} - -type scopeStack []scopeStackElem - -func (s *scopeStack) pop() { - n := len(*s) - *s = (*s)[:n-1] -} - -func (s *scopeStack) push(e scopeStackElem) { - *s = append(*s, e) -} - -func (s *scopeStack) top() *scopeStackElem { - return &(*s)[len(*s)-1] -} - -func (s scopeStack) describe() string { - desc := "" - if len(s) > 1 { - desc = "(" + s[1].value.Type().String() + ")" - } - for i, v := range s { - if i < 2 { - // First layer on stack is not real; second is handled specially above. - continue - } - if v.key == "" { - desc += fmt.Sprintf(".%v", v.value.Type()) - } else { - desc += fmt.Sprintf(".%v", v.key) - } - } - return desc -} - -// Formats src & dest as indices for printing. -func (s *scope) setIndices(src, dest int) { - s.srcStack.top().key = fmt.Sprintf("[%v]", src) - s.destStack.top().key = fmt.Sprintf("[%v]", dest) -} - -// Formats src & dest as map keys for printing. -func (s *scope) setKeys(src, dest interface{}) { - s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src) - s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest) } // Convert continues a conversion. -func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { - return s.converter.Convert(src, dest, flags, s.meta) -} - -// SrcTag returns the tag of the struct containing the current source item, if any. -func (s *scope) SrcTag() reflect.StructTag { - return s.srcStack.top().tag -} - -// DestTag returns the tag of the struct containing the current dest item, if any. -func (s *scope) DestTag() reflect.StructTag { - return s.destStack.top().tag -} - -// Flags returns the flags with which the current conversion was started. -func (s *scope) Flags() FieldMatchingFlags { - return s.flags +func (s *scope) Convert(src, dest interface{}) error { + return s.converter.Convert(src, dest, s.meta) } // Meta returns the meta object that was originally passed to Convert. @@ -290,50 +167,6 @@ func (s *scope) Meta() *Meta { return s.meta } -// describe prints the path to get to the current (source, dest) values. -func (s *scope) describe() (src, dest string) { - return s.srcStack.describe(), s.destStack.describe() -} - -// error makes an error that includes information about where we were in the objects -// we were asked to convert. -func (s *scope) errorf(message string, args ...interface{}) error { - srcPath, destPath := s.describe() - where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath) - return fmt.Errorf(where+message, args...) -} - -// Verifies whether a conversion function has a correct signature. -func verifyConversionFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got: %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got: %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) - } - scopeType := Scope(nil) - if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil. - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. @@ -364,71 +197,16 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { return nil } -// RegisterInputDefaults registers a field name mapping function, used when converting -// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping -// applied automatically if the input matches in. A set of default flags for the input conversion -// may also be provided, which will be used when no explicit flags are requested. -func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error { - fv := reflect.ValueOf(in) - ft := fv.Type() - if ft.Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer 'in' argument, got: %v", ft) - } - c.inputFieldMappingFuncs[ft] = fn - c.inputDefaultFlags[ft] = defaultFlags - return nil -} - -// FieldMatchingFlags contains a list of ways in which struct fields could be -// copied. These constants may be | combined. -type FieldMatchingFlags int - -const ( - // Loop through destination fields, search for matching source - // field to copy it from. Source fields with no corresponding - // destination field will be ignored. If SourceToDest is - // specified, this flag is ignored. If neither is specified, - // or no flags are passed, this flag is the default. - DestFromSource FieldMatchingFlags = 0 - // Loop through source fields, search for matching dest field - // to copy it into. Destination fields with no corresponding - // source field will be ignored. - SourceToDest FieldMatchingFlags = 1 << iota - // Don't treat it as an error if the corresponding source or - // dest field can't be found. - IgnoreMissingFields - // Don't require type names to match. - AllowDifferentFieldTypeNames -) - -// IsSet returns true if the given flag or combination of flags is set. -func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { - if flag == DestFromSource { - // The bit logic doesn't work on the default value. - return f&SourceToDest != SourceToDest - } - return f&flag == flag -} - // Convert will translate src to dest if it knows how. Both must be pointers. // If no conversion func is registered and the default copying mechanism // doesn't work on this type pair, an error will be returned. -// Read the comments on the various FieldMatchingFlags constants to understand -// what the 'flags' parameter does. // 'meta' is given to allow you to pass information to conversion functions, // it is not used by Convert() other than storing it in the scope. // Not safe for objects with cyclic references! -func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { - return c.doConversion(src, dest, flags, meta, c.convert) -} - -type conversionFunc func(sv, dv reflect.Value, scope *scope) error - -func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { +func (c *Converter) Convert(src, dest interface{}, meta *Meta) error { pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)} scope := &scope{ converter: c, - flags: flags, meta: meta, } @@ -452,366 +230,4 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags return err } return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type()) - - // TODO: Everything past this point is deprecated. - // Remove in 1.20 once we're sure it didn't break anything. - - // Leave something on the stack, so that calls to struct tag getters never fail. - scope.srcStack.push(scopeStackElem{}) - scope.destStack.push(scopeStackElem{}) - return f(sv, dv, scope) -} - -// callUntyped calls predefined conversion func. -func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error { - if !dv.CanAddr() { - return scope.errorf("cant addr dest") - } - var svPointer reflect.Value - if sv.CanAddr() { - svPointer = sv.Addr() - } else { - svPointer = reflect.New(sv.Type()) - svPointer.Elem().Set(sv) - } - dvPointer := dv.Addr() - return f(svPointer.Interface(), dvPointer.Interface(), scope) -} - -// convert recursively copies sv into dv, calling an appropriate conversion function if -// one is registered. -func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { - dt, st := dv.Type(), sv.Type() - pair := typePair{st, dt} - - // ignore conversions of this type - if _, ok := c.ignoredConversions[pair]; ok { - if c.Debug != nil { - c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt) - } - return nil - } - - // Convert sv to dv. - pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())} - if f, ok := c.conversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) - } - if f, ok := c.generatedConversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) - } - - if !dv.CanSet() { - return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") - } - - if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) { - return scope.errorf( - "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.", - c.nameFunc(st), c.nameFunc(dt), st, dt) - } - - switch st.Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - // Don't copy these via assignment/conversion! - default: - // This should handle all simple types. - if st.AssignableTo(dt) { - dv.Set(sv) - return nil - } - if st.ConvertibleTo(dt) { - dv.Set(sv.Convert(dt)) - return nil - } - } - - if c.Debug != nil { - c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt) - } - - scope.srcStack.push(scopeStackElem{value: sv}) - scope.destStack.push(scopeStackElem{value: dv}) - defer scope.srcStack.pop() - defer scope.destStack.pop() - - switch dv.Kind() { - case reflect.Struct: - return c.convertKV(toKVValue(sv), toKVValue(dv), scope) - case reflect.Slice: - if sv.IsNil() { - // Don't make a zero-length slice. - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) - for i := 0; i < sv.Len(); i++ { - scope.setIndices(i, i) - if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil { - return err - } - } - case reflect.Ptr: - if sv.IsNil() { - // Don't copy a nil ptr! - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.New(dt.Elem())) - switch st.Kind() { - case reflect.Ptr, reflect.Interface: - return c.convert(sv.Elem(), dv.Elem(), scope) - default: - return c.convert(sv, dv.Elem(), scope) - } - case reflect.Map: - if sv.IsNil() { - // Don't copy a nil ptr! - dv.Set(reflect.Zero(dt)) - return nil - } - dv.Set(reflect.MakeMap(dt)) - for _, sk := range sv.MapKeys() { - dk := reflect.New(dt.Key()).Elem() - if err := c.convert(sk, dk, scope); err != nil { - return err - } - dkv := reflect.New(dt.Elem()).Elem() - scope.setKeys(sk.Interface(), dk.Interface()) - // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false, - // because a map[string]struct{} does not allow a pointer reference. - // Calling a custom conversion function defined for the map value - // will panic. Example is PodInfo map[string]ContainerStatus. - if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil { - return err - } - dv.SetMapIndex(dk, dkv) - } - case reflect.Interface: - if sv.IsNil() { - // Don't copy a nil interface! - dv.Set(reflect.Zero(dt)) - return nil - } - tmpdv := reflect.New(sv.Elem().Type()).Elem() - if err := c.convert(sv.Elem(), tmpdv, scope); err != nil { - return err - } - dv.Set(reflect.ValueOf(tmpdv.Interface())) - return nil - default: - return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt) - } - return nil -} - -var stringType = reflect.TypeOf("") - -func toKVValue(v reflect.Value) kvValue { - switch v.Kind() { - case reflect.Struct: - return structAdaptor(v) - case reflect.Map: - if v.Type().Key().AssignableTo(stringType) { - return stringMapAdaptor(v) - } - } - - return nil -} - -// kvValue lets us write the same conversion logic to work with both maps -// and structs. Only maps with string keys make sense for this. -type kvValue interface { - // returns all keys, as a []string. - keys() []string - // Will just return "" for maps. - tagOf(key string) reflect.StructTag - // Will return the zero Value if the key doesn't exist. - value(key string) reflect.Value - // Maps require explicit setting-- will do nothing for structs. - // Returns false on failure. - confirmSet(key string, v reflect.Value) bool -} - -type stringMapAdaptor reflect.Value - -func (a stringMapAdaptor) len() int { - return reflect.Value(a).Len() -} - -func (a stringMapAdaptor) keys() []string { - v := reflect.Value(a) - keys := make([]string, v.Len()) - for i, v := range v.MapKeys() { - if v.IsNil() { - continue - } - switch t := v.Interface().(type) { - case string: - keys[i] = t - } - } - return keys -} - -func (a stringMapAdaptor) tagOf(key string) reflect.StructTag { - return "" -} - -func (a stringMapAdaptor) value(key string) reflect.Value { - return reflect.Value(a).MapIndex(reflect.ValueOf(key)) -} - -func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool { - return true -} - -type structAdaptor reflect.Value - -func (a structAdaptor) len() int { - v := reflect.Value(a) - return v.Type().NumField() -} - -func (a structAdaptor) keys() []string { - v := reflect.Value(a) - t := v.Type() - keys := make([]string, t.NumField()) - for i := range keys { - keys[i] = t.Field(i).Name - } - return keys -} - -func (a structAdaptor) tagOf(key string) reflect.StructTag { - v := reflect.Value(a) - field, ok := v.Type().FieldByName(key) - if ok { - return field.Tag - } - return "" -} - -func (a structAdaptor) value(key string) reflect.Value { - v := reflect.Value(a) - return v.FieldByName(key) -} - -func (a structAdaptor) confirmSet(key string, v reflect.Value) bool { - return true -} - -// convertKV can convert things that consist of key/value pairs, like structs -// and some maps. -func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error { - if skv == nil || dkv == nil { - // TODO: add keys to stack to support really understandable error messages. - return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv) - } - - lister := dkv - if scope.flags.IsSet(SourceToDest) { - lister = skv - } - - var mapping FieldMappingFunc - if scope.meta != nil && scope.meta.KeyNameMapping != nil { - mapping = scope.meta.KeyNameMapping - } - - for _, key := range lister.keys() { - if found, err := c.checkField(key, skv, dkv, scope); found { - if err != nil { - return err - } - continue - } - stag := skv.tagOf(key) - dtag := dkv.tagOf(key) - skey := key - dkey := key - if mapping != nil { - skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag) - } - - df := dkv.value(dkey) - sf := skv.value(skey) - if !df.IsValid() || !sf.IsValid() { - switch { - case scope.flags.IsSet(IgnoreMissingFields): - // No error. - case scope.flags.IsSet(SourceToDest): - return scope.errorf("%v not present in dest", dkey) - default: - return scope.errorf("%v not present in src", skey) - } - continue - } - scope.srcStack.top().key = skey - scope.srcStack.top().tag = stag - scope.destStack.top().key = dkey - scope.destStack.top().tag = dtag - if err := c.convert(sf, df, scope); err != nil { - return err - } - } - return nil -} - -// checkField returns true if the field name matches any of the struct -// field copying rules. The error should be ignored if it returns false. -func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) { - replacementMade := false - if scope.flags.IsSet(DestFromSource) { - df := dkv.value(fieldName) - if !df.IsValid() { - return false, nil - } - destKey := typeNamePair{df.Type(), fieldName} - // Check each of the potential source (type, name) pairs to see if they're - // present in sv. - for _, potentialSourceKey := range c.structFieldSources[destKey] { - sf := skv.value(potentialSourceKey.fieldName) - if !sf.IsValid() { - continue - } - if sf.Type() == potentialSourceKey.fieldType { - // Both the source's name and type matched, so copy. - scope.srcStack.top().key = potentialSourceKey.fieldName - scope.destStack.top().key = fieldName - if err := c.convert(sf, df, scope); err != nil { - return true, err - } - dkv.confirmSet(fieldName, df) - replacementMade = true - } - } - return replacementMade, nil - } - - sf := skv.value(fieldName) - if !sf.IsValid() { - return false, nil - } - srcKey := typeNamePair{sf.Type(), fieldName} - // Check each of the potential dest (type, name) pairs to see if they're - // present in dv. - for _, potentialDestKey := range c.structFieldDests[srcKey] { - df := dkv.value(potentialDestKey.fieldName) - if !df.IsValid() { - continue - } - if df.Type() == potentialDestKey.fieldType { - // Both the dest's name and type matched, so copy. - scope.srcStack.top().key = fieldName - scope.destStack.top().key = potentialDestKey.fieldName - if err := c.convert(sf, df, scope); err != nil { - return true, err - } - dkv.confirmSet(potentialDestKey.fieldName, df) - replacementMade = true - } - } - return replacementMade, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index d9eeb4f91..d6bbeeaca 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -141,25 +141,6 @@ func Equals(labels1, labels2 Set) bool { return true } -// AreLabelsInWhiteList verifies if the provided label list -// is in the provided whitelist and returns true, otherwise false. -func AreLabelsInWhiteList(labels, whitelist Set) bool { - if len(whitelist) == 0 { - return true - } - - for k, v := range labels { - value, ok := whitelist[k] - if !ok { - return false - } - if value != v { - return false - } - } - return true -} - // ConvertSelectorToLabelsMap converts selector string to labels map // and validates keys and values func ConvertSelectorToLabelsMap(selector string) (Set, error) { diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index bf62f98a4..50ae4f7ce 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -263,11 +263,11 @@ func (r *Requirement) Values() sets.String { } // Empty returns true if the internalSelector doesn't restrict selection space -func (lsel internalSelector) Empty() bool { - if lsel == nil { +func (s internalSelector) Empty() bool { + if s == nil { return true } - return len(lsel) == 0 + return len(s) == 0 } // String returns a human-readable string that represents this @@ -330,51 +330,51 @@ func safeSort(in []string) []string { } // Add adds requirements to the selector. It copies the current selector returning a new one -func (lsel internalSelector) Add(reqs ...Requirement) Selector { - var sel internalSelector - for ix := range lsel { - sel = append(sel, lsel[ix]) +func (s internalSelector) Add(reqs ...Requirement) Selector { + var ret internalSelector + for ix := range s { + ret = append(ret, s[ix]) } for _, r := range reqs { - sel = append(sel, r) + ret = append(ret, r) } - sort.Sort(ByKey(sel)) - return sel + sort.Sort(ByKey(ret)) + return ret } // Matches for a internalSelector returns true if all // its Requirements match the input Labels. If any // Requirement does not match, false is returned. -func (lsel internalSelector) Matches(l Labels) bool { - for ix := range lsel { - if matches := lsel[ix].Matches(l); !matches { +func (s internalSelector) Matches(l Labels) bool { + for ix := range s { + if matches := s[ix].Matches(l); !matches { return false } } return true } -func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } +func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true } // String returns a comma-separated string of all // the internalSelector Requirements' human-readable strings. -func (lsel internalSelector) String() string { +func (s internalSelector) String() string { var reqs []string - for ix := range lsel { - reqs = append(reqs, lsel[ix].String()) + for ix := range s { + reqs = append(reqs, s[ix].String()) } return strings.Join(reqs, ",") } // RequiresExactMatch introspect whether a given selector requires a single specific field // to be set, and if so returns the value it requires. -func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { - for ix := range lsel { - if lsel[ix].key == label { - switch lsel[ix].operator { +func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range s { + if s[ix].key == label { + switch s[ix].operator { case selection.Equals, selection.DoubleEquals, selection.In: - if len(lsel[ix].strValues) == 1 { - return lsel[ix].strValues[0], true + if len(s[ix].strValues) == 1 { + return s[ix].strValues[0], true } } return "", false @@ -789,12 +789,12 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) { // parseExactValue parses the only value for exact match style func (p *Parser) parseExactValue() (sets.String, error) { s := sets.NewString() - tok, lit := p.lookahead(Values) + tok, _ := p.lookahead(Values) if tok == EndOfStringToken || tok == CommaToken { s.Insert("") return s, nil } - tok, lit = p.consume(Values) + tok, lit := p.consume(Values) if tok == IdentifierToken { s.Insert(lit) return s, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 871e4c8c4..4a6cc6857 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -186,6 +186,9 @@ func fromUnstructured(sv, dv reflect.Value) error { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: dv.Set(sv.Convert(dt)) return nil + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil } case reflect.Float32, reflect.Float64: switch dt.Kind() { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 071971817..ac428d610 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -450,10 +450,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -567,10 +564,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -751,10 +745,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 0e212ec94..3b25391fa 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index f44693c0c..3e1fab1d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -57,7 +57,7 @@ type Encoder interface { // Identifiers of two different encoders should be equal if and only if for every input // object it will be encoded to the same representation by both of them. // - // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // Identifier is intended for use with CacheableObject#CacheEncode method. In order to // correctly handle CacheableObject, Encode() method should look similar to below, where // doEncode() is the encoding logic of implemented encoder: // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go index 159b30120..3ab119b0a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -92,39 +92,6 @@ func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion } } -// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver -// where objects are converted to gv prior to sending and decoded to their internal representation prior -// to retrieval. -// -// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. -func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { - decode := schema.GroupVersions{ - { - Group: gv.Group, - Version: APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: APIVersionInternal, - }, - } - return &clientNegotiator{ - encode: gv, - decode: decode, - serializer: serializer, - } -} - -// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used -// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. -func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { - return &clientNegotiator{ - serializer: &simpleNegotiatedSerializer{info: info}, - encode: gv, - } -} - type simpleNegotiatedSerializer struct { info SerializerInfo } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index 5aeeaa100..c50766a4b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime.schema; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go index b57066845..f04453fb0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -23,8 +23,8 @@ type ObjectKind interface { // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil // should clear the current setting. SetGroupVersionKind(kind GroupVersionKind) - // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does - // not expose or provide these fields. + // GroupVersionKind returns the stored group, version, and kind of an object, or an empty struct + // if the object does not expose or provide these fields. GroupVersionKind() GroupVersionKind } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 3b254961d..697dd4ed7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -18,7 +18,6 @@ package runtime import ( "fmt" - "net/url" "reflect" "strings" @@ -105,9 +104,6 @@ func NewScheme() *Scheme { // Enable couple default conversions by default. utilruntime.Must(RegisterEmbeddedConversions(s)) utilruntime.Must(RegisterStringConversions(s)) - - utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) - utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) return s } @@ -309,11 +305,6 @@ func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { return nil, NewNotRegisteredErrForKind(s.schemeName, kind) } -// Log sets a logger on the scheme. For test purposes only -func (s *Scheme) Log(l conversion.DebugLogger) { - s.converter.Debug = l -} - // AddIgnoredConversionType identifies a pair of types that should be skipped by // conversion (because the data inside them is explicitly dropped during // conversion). @@ -342,14 +333,6 @@ func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conver return nil } -// RegisterInputDefaults sets the provided field mapping function and field matching -// as the defaults for the provided input type. The fn may be nil, in which case no -// mapping will happen by default. Use this method to register a mechanism for handling -// a specific input type in conversion, such as a map[string]string to structs. -func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { - return s.converter.RegisterInputDefaults(in, fn, defaultFlags) -} - // AddTypeDefaultingFunc registers a function that is passed a pointer to an // object and can default fields on the object. These functions will be invoked // when Default() is called. The function will never be called unless the @@ -433,12 +416,9 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { in = typed } - flags, meta := s.generateConvertMeta(in) + meta := s.generateConvertMeta(in) meta.Context = context - if flags == 0 { - flags = conversion.AllowDifferentFieldTypeNames - } - return s.converter.Convert(in, out, flags, meta) + return s.converter.Convert(in, out, meta) } // ConvertFieldLabel alters the given field label and value for an kind field selector from @@ -535,9 +515,9 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( in = in.DeepCopyObject() } - flags, meta := s.generateConvertMeta(in) + meta := s.generateConvertMeta(in) meta.Context = target - if err := s.converter.Convert(in, out, flags, meta); err != nil { + if err := s.converter.Convert(in, out, meta); err != nil { return nil, err } @@ -565,7 +545,7 @@ func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { } // generateConvertMeta constructs the meta value we pass to Convert. -func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { +func (s *Scheme) generateConvertMeta(in interface{}) *conversion.Meta { return s.converter.DefaultMeta(reflect.TypeOf(in)) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index f21b0ef19..e55ab94d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -108,10 +108,9 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option // CodecFactory provides methods for retrieving codecs and serializers for specific // versions and content types. type CodecFactory struct { - scheme *runtime.Scheme - serializers []serializerType - universal runtime.Decoder - accepts []runtime.SerializerInfo + scheme *runtime.Scheme + universal runtime.Decoder + accepts []runtime.SerializerInfo legacySerializer runtime.Serializer } @@ -216,9 +215,8 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } return CodecFactory{ - scheme: scheme, - serializers: serializers, - universal: recognizer.NewDecoder(decoders...), + scheme: scheme, + universal: recognizer.NewDecoder(decoders...), accepts: accepts, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index e081d7ff1..83b2e1393 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -96,6 +96,7 @@ type SerializerOptions struct { Strict bool } +// Serializer handles encoding versioned objects into the proper JSON form type Serializer struct { meta MetaFactory options SerializerOptions @@ -144,10 +145,10 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } } -// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be // case-sensitive when unmarshalling, and otherwise compatible with // the encoding/json standard library. -func CaseSensitiveJsonIterator() jsoniter.API { +func CaseSensitiveJSONIterator() jsoniter.API { config := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, @@ -159,10 +160,10 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be // case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with // the encoding/json standard library. -func StrictCaseSensitiveJsonIterator() jsoniter.API { +func StrictCaseSensitiveJSONIterator() jsoniter.API { config := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, @@ -179,8 +180,8 @@ func StrictCaseSensitiveJsonIterator() jsoniter.API { // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 -var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() -var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() +var caseSensitiveJSONIterator = CaseSensitiveJSONIterator() +var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -236,7 +237,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { + if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil { return nil, actual, err } return into, actual, nil @@ -260,7 +261,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { + if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } @@ -285,7 +286,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, // the actual error is that the object contains unknown field. strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil { return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) } // Always return the same object as the non-strict serializer to avoid any deviations. @@ -302,7 +303,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { if s.options.Yaml { - json, err := caseSensitiveJsonIterator.Marshal(obj) + json, err := caseSensitiveJSONIterator.Marshal(obj) if err != nil { return err } @@ -315,7 +316,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { } if s.options.Pretty { - data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") + data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ") if err != nil { return err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index f606b7d72..404fb1b7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -61,6 +61,7 @@ func (e errNotMarshalable) Status() metav1.Status { } } +// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise func IsNotMarshalable(err error) bool { _, ok := err.(errNotMarshalable) return err != nil && ok @@ -77,6 +78,7 @@ func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Se } } +// Serializer handles encoding versioned objects into the proper wire form type Serializer struct { prefix []byte creater runtime.ObjectCreater @@ -457,8 +459,10 @@ func (s *RawSerializer) Identifier() runtime.Identifier { return rawSerializerIdentifier } +// LengthDelimitedFramer is exported variable of type lengthDelimitedFramer var LengthDelimitedFramer = lengthDelimitedFramer{} +// Provides length delimited frame reader and writer methods type lengthDelimitedFramer struct{} // NewFrameWriter implements stream framing for this serializer diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 88f0de36d..b19750f3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -16,10 +16,6 @@ limitations under the License. package types -import ( - "fmt" -) - // NamespacedName comprises a resource name, with a mandatory namespace, // rendered as "/". Being a type captures intent and // helps make sure that UIDs, namespaced names and non-namespaced names @@ -39,5 +35,5 @@ const ( // String returns the general purpose string representation func (n NamespacedName) String() string { - return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) + return n.Namespace + string(Separator) + n.Name } diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 84b4f5884..faf2c2645 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -145,7 +145,7 @@ func (c *Expiring) gc(now time.Time) { // expired. // // heap[0] is a peek at the next element in the heap, which is not obvious - // from looking at the (*expiringHeap).Pop() implmentation below. + // from looking at the (*expiringHeap).Pop() implementation below. // heap.Pop() swaps the first entry with the last entry of the heap, then // calls (*expiringHeap).Pop() which returns the last element. if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 6cf13d83d..3e1e2517b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -348,7 +348,13 @@ func (f *fakeTimer) Stop() bool { // Reset conditionally updates the firing time of the timer. If the // timer has neither fired nor been stopped then this call resets the // timer to the fake clock's "now" + d and returns true, otherwise -// this call returns false. This is like time.Timer::Reset. +// it creates a new waiter, adds it to the clock, and returns true. +// +// It is not possible to return false, because a fake timer can be reset +// from any state (waiting to fire, already fired, and stopped). +// +// See the GoDoc for time.Timer::Reset for more context on why +// the return value of Reset() is not useful. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() @@ -360,7 +366,15 @@ func (f *fakeTimer) Reset(d time.Duration) bool { return true } } - return false + // No existing waiter, timer has already fired or been reset. + // We should still enable Reset() to succeed by creating a + // new waiter and adding it to the clock's waiters. + newWaiter := fakeClockWaiter{ + targetTime: f.fakeClock.time.Add(d), + destChan: seekChan, + } + f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter) + return true } // Ticker defines the Ticker interface diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 5bafc218e..1f5a04fd4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -163,7 +163,7 @@ func matchesError(err error, fns ...Matcher) bool { // filterErrors returns any errors (or nested errors, if the list contains // nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all +// remain a nil list is returned. The resulting slice will have all // nested slices flattened as a side effect. func filterErrors(list []error, fns ...Matcher) []error { result := []error{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 066680f44..45aa74bf5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -132,12 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // Return whatever remaining data exists from an in progress frame if n := len(r.remaining); n > 0 { if n <= len(data) { + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining...) r.remaining = nil return n, nil } n = len(data) + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining[:n]...) r.remaining = r.remaining[n:] return n, io.ErrShortBuffer @@ -155,6 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. if len(m) > n { + //lint:ignore SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], m[:n]...) r.remaining = m[n:] return n, io.ErrShortBuffer diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index ec1cb70f2..a4792034a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -268,10 +268,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index e79fb9e57..a76f79851 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -17,7 +17,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package k8s.io.apimachinery.pkg.util.intstr; diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go new file mode 100644 index 000000000..2501d5516 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -0,0 +1,42 @@ +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +// ensure IntOrString implements fuzz.Interface +var _ fuzz.Interface = &IntOrString{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 6576def82..c0e8927fe 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,7 +25,6 @@ import ( "strconv" "strings" - "github.com/google/gofuzz" "k8s.io/klog/v2" ) @@ -90,6 +89,9 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { + if intstr == nil { + return "" + } if intstr.Type == String { return intstr.StrVal } @@ -129,21 +131,6 @@ func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // the OpenAPI spec of this type. func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } -func (intstr *IntOrString) Fuzz(c fuzz.Continue) { - if intstr == nil { - return - } - if c.RandBool() { - intstr.Type = Int - c.Fuzz(&intstr.IntVal) - intstr.StrVal = "" - } else { - intstr.Type = String - intstr.IntVal = 0 - c.Fuzz(&intstr.StrVal) - } -} - func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { if intOrPercent == nil { return &defaultValue @@ -151,6 +138,33 @@ func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrS return intOrPercent } +// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. +// This method returns a scaled value from an IntOrString type. If the IntOrString +// is a percentage string value it's treated as a percentage and scaled appropriately +// in accordance to the total, if it's an int value it's treated as a a simple value and +// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. +func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValueSafely(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// GetValueFromIntOrPercent was deprecated in favor of +// GetScaledValueFromIntOrPercent. This method was treating all int as a numeric value and all +// strings with or without a percent symbol as a percentage value. +// Deprecated func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { if intOrPercent == nil { return 0, errors.New("nil value for IntOrString") @@ -169,6 +183,8 @@ func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool return value, nil } +// getIntOrPercentValue is a legacy function and only meant to be called by GetValueFromIntOrPercent +// For a more correct implementation call getIntOrPercentSafely func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { switch intOrStr.Type { case Int: @@ -183,3 +199,25 @@ func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { } return 0, false, fmt.Errorf("invalid type: neither int nor percentage") } + +func getIntOrPercentValueSafely(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + isPercent := false + s := intOrStr.StrVal + if strings.HasSuffix(s, "%") { + isPercent = true + s = strings.TrimSuffix(intOrStr.StrVal, "%") + } else { + return 0, false, fmt.Errorf("invalid type: string is not a percentage") + } + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), isPercent, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 204834883..778e58f70 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -39,7 +39,8 @@ func Marshal(v interface{}) ([]byte, error) { const maxDepth = 10000 // Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers +// are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { switch v := v.(type) { case *map[string]interface{}: @@ -52,7 +53,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v, 0) + return ConvertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -64,7 +65,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v, 0) + return ConvertSliceNumbers(*v, 0) case *interface{}: // Build a decoder from the given data @@ -76,29 +77,31 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertInterfaceNumbers(v, 0) + return ConvertInterfaceNumbers(v, 0) default: return json.Unmarshal(data, v) } } -func convertInterfaceNumbers(v *interface{}, depth int) error { +// ConvertInterfaceNumbers converts any json.Number values to int64 or float64. +// Values which are map[string]interface{} or []interface{} are recursively visited +func ConvertInterfaceNumbers(v *interface{}, depth int) error { var err error switch v2 := (*v).(type) { case json.Number: *v, err = convertNumber(v2) case map[string]interface{}: - err = convertMapNumbers(v2, depth+1) + err = ConvertMapNumbers(v2, depth+1) case []interface{}: - err = convertSliceNumbers(v2, depth+1) + err = ConvertSliceNumbers(v2, depth+1) } return err } -// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// ConvertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}, depth int) error { +func ConvertMapNumbers(m map[string]interface{}, depth int) error { if depth > maxDepth { return fmt.Errorf("exceeded max depth of %d", maxDepth) } @@ -109,9 +112,9 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = ConvertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = ConvertSliceNumbers(v, depth+1) } if err != nil { return err @@ -120,9 +123,9 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { return nil } -// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// ConvertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}, depth int) error { +func ConvertSliceNumbers(s []interface{}, depth int) error { if depth > maxDepth { return fmt.Errorf("exceeded max depth of %d", maxDepth) } @@ -133,9 +136,9 @@ func convertSliceNumbers(s []interface{}, depth int) error { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = ConvertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = ConvertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go index 7b6eca893..42ecffcca 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -130,7 +130,7 @@ func (*PortRange) Type() string { } // ParsePortRange parses a string of the form "min-max", inclusive at both -// ends, and initializs a new PortRange from it. +// ends, and initializes a new PortRange from it. func ParsePortRange(value string) (*PortRange, error) { pr := &PortRange{} err := pr.Set(value) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index e8a9f609f..035c52811 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -79,7 +79,7 @@ func logPanic(r interface{}) { } } -// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// ErrorHandlers is a list of functions which will be invoked when a nonreturnable // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. @@ -165,7 +165,7 @@ func RecoverFromPanic(err *error) { } } -// Must panics on non-nil errors. Useful to handling programmer level errors. +// Must panics on non-nil errors. Useful to handling programmer level errors. func Must(err error) { if err != nil { panic(err) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go index 2efc8eec7..f9be7ac33 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -67,6 +67,9 @@ func (p *Path) Key(key string) *Path { // String produces a string representation of the Path. func (p *Path) String() string { + if p == nil { + return "" + } // make a slice to iterate elems := []*Path{} for ; p != nil; p = p.parent { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 4752b29a9..c8b419984 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -175,7 +175,7 @@ func IsValidLabelValue(value string) []string { } const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" // DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 @@ -196,7 +196,7 @@ func IsDNS1123Label(value string) []string { } const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" // DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 @@ -347,7 +347,7 @@ func IsValidPortName(port string) []string { // IsValidIP tests that the argument is a valid IP address. func IsValidIP(value string) []string { if net.ParseIP(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index d759d912b..3dea7fe7f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -604,3 +604,32 @@ func poller(interval, timeout time.Duration) WaitFunc { return ch }) } + +// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never +// exceeds the deadline specified by the request context. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 492171faf..7fe706467 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,10 +26,41 @@ import ( "strings" "unicode" + jsonutil "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog/v2" "sigs.k8s.io/yaml" ) +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers +// are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + preserveIntFloat := func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + } + switch v := v.(type) { + case *map[string]interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertMapNumbers(*v, 0) + case *[]interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertSliceNumbers(*v, 0) + case *interface{}: + if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertInterfaceNumbers(v, 0) + default: + return yaml.Unmarshal(data, v) + } +} + // ToJSON converts a single YAML document into a JSON document // or returns an error. If the document appears to be JSON the // YAML decoding path is not used (so that error messages are diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index 0ac8dc4ef..0aaf01adc 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -40,15 +40,12 @@ const incomingQueueLength = 25 // Broadcaster distributes event notifications among any number of watchers. Every event // is delivered to every watcher. type Broadcaster struct { - // TODO: see if this lock is needed now that new watchers go through - // the incoming channel. - lock sync.Mutex - watchers map[int64]*broadcasterWatcher nextWatcher int64 distributing sync.WaitGroup incoming chan Event + stopped chan struct{} // How large to make watcher's channel. watchQueueLength int @@ -68,6 +65,7 @@ func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *B m := &Broadcaster{ watchers: map[int64]*broadcasterWatcher{}, incoming: make(chan Event, incomingQueueLength), + stopped: make(chan struct{}), watchQueueLength: queueLength, fullChannelBehavior: fullChannelBehavior, } @@ -96,10 +94,15 @@ func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { // The purpose of this terrible hack is so that watchers added after an event // won't ever see that event, and will always see any event after they are // added. -func (b *Broadcaster) blockQueue(f func()) { +func (m *Broadcaster) blockQueue(f func()) { + select { + case <-m.stopped: + return + default: + } var wg sync.WaitGroup wg.Add(1) - b.incoming <- Event{ + m.incoming <- Event{ Type: internalRunFunctionMarker, Object: functionFakeRuntimeObject(func() { defer wg.Done() @@ -111,12 +114,11 @@ func (b *Broadcaster) blockQueue(f func()) { // Watch adds a new watcher to the list and returns an Interface for it. // Note: new watchers will only receive new events. They won't get an entire history -// of previous events. +// of previous events. It will block until the watcher is actually added to the +// broadcaster. func (m *Broadcaster) Watch() Interface { var w *broadcasterWatcher m.blockQueue(func() { - m.lock.Lock() - defer m.lock.Unlock() id := m.nextWatcher m.nextWatcher++ w = &broadcasterWatcher{ @@ -127,18 +129,22 @@ func (m *Broadcaster) Watch() Interface { } m.watchers[id] = w }) + if w == nil { + // The panic here is to be consistent with the previous interface behavior + // we are willing to re-evaluate in the future. + panic("broadcaster already stopped") + } return w } // WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends // queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. // The returned watch will have a queue length that is at least large enough to accommodate -// all of the items in queuedEvents. +// all of the items in queuedEvents. It will block until the watcher is actually added to +// the broadcaster. func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { var w *broadcasterWatcher m.blockQueue(func() { - m.lock.Lock() - defer m.lock.Unlock() id := m.nextWatcher m.nextWatcher++ length := m.watchQueueLength @@ -156,26 +162,29 @@ func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { w.result <- e } }) + if w == nil { + // The panic here is to be consistent with the previous interface behavior + // we are willing to re-evaluate in the future. + panic("broadcaster already stopped") + } return w } // stopWatching stops the given watcher and removes it from the list. func (m *Broadcaster) stopWatching(id int64) { - m.lock.Lock() - defer m.lock.Unlock() - w, ok := m.watchers[id] - if !ok { - // No need to do anything, it's already been removed from the list. - return - } - delete(m.watchers, id) - close(w.result) + m.blockQueue(func() { + w, ok := m.watchers[id] + if !ok { + // No need to do anything, it's already been removed from the list. + return + } + delete(m.watchers, id) + close(w.result) + }) } // closeAll disconnects all watchers (presumably in response to a Shutdown call). func (m *Broadcaster) closeAll() { - m.lock.Lock() - defer m.lock.Unlock() for _, w := range m.watchers { close(w.result) } @@ -194,9 +203,12 @@ func (m *Broadcaster) Action(action EventType, obj runtime.Object) { // until all events have been distributed through the outbound channels. Note // that since they can be buffered, this means that the watchers might not // have received the data yet as it can remain sitting in the buffered -// channel. +// channel. It will block until the broadcaster stop request is actually executed func (m *Broadcaster) Shutdown() { - close(m.incoming) + m.blockQueue(func() { + close(m.stopped) + close(m.incoming) + }) m.distributing.Wait() } @@ -217,8 +229,6 @@ func (m *Broadcaster) loop() { // distribute sends event to all watchers. Blocking. func (m *Broadcaster) distribute(event Event) { - m.lock.Lock() - defer m.lock.Unlock() if m.fullChannelBehavior == DropIfChannelFull { for _, w := range m.watchers { select { @@ -252,6 +262,7 @@ func (mw *broadcasterWatcher) ResultChan() <-chan Event { } // Stop stops watching and removes mw from its list. +// It will block until the watcher stop request is actually executed func (mw *broadcasterWatcher) Stop() { mw.stop.Do(func() { close(mw.stopped) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 8271e9b70..99f6770b9 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -97,9 +97,9 @@ func (sw *StreamWatcher) stopping() bool { // receive reads result from the decoder in a loop and sends down the result channel. func (sw *StreamWatcher) receive() { + defer utilruntime.HandleCrash() defer close(sw.result) defer sw.Stop() - defer utilruntime.HandleCrash() for { action, obj, err := sw.source.Decode() if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index 1f4911a31..fd0550e4a 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -276,7 +276,7 @@ func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { } } -// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe. +// ProxyWatcher lets you wrap your channel in watch Interface. threadsafe. type ProxyWatcher struct { result chan Event stopCh chan struct{} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 6c8e87e23..57404e0b2 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -501,7 +501,7 @@ func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { } -// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. +// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} } diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 064c24d12..f0d54b6a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -24,6 +24,7 @@ import ( discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" @@ -48,8 +49,10 @@ import ( eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" @@ -59,7 +62,6 @@ import ( schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" @@ -71,6 +73,7 @@ type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface + InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface @@ -95,8 +98,10 @@ type Interface interface { EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface + FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1() nodev1.NodeV1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface NodeV1beta1() nodev1beta1.NodeV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface @@ -106,7 +111,6 @@ type Interface interface { SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface - SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface @@ -118,6 +122,7 @@ type Clientset struct { *discovery.DiscoveryClient admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client appsV1 *appsv1.AppsV1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client @@ -142,8 +147,10 @@ type Clientset struct { eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1 *nodev1.NodeV1Client nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client nodeV1beta1 *nodev1beta1.NodeV1beta1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client @@ -153,7 +160,6 @@ type Clientset struct { schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client @@ -169,6 +175,11 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } +// InternalV1alpha1 retrieves the InternalV1alpha1Client +func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface { + return c.internalV1alpha1 +} + // AppsV1 retrieves the AppsV1Client func (c *Clientset) AppsV1() appsv1.AppsV1Interface { return c.appsV1 @@ -289,6 +300,11 @@ func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha return c.flowcontrolV1alpha1 } +// FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client +func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { + return c.flowcontrolV1beta1 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 @@ -299,6 +315,11 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return c.networkingV1beta1 } +// NodeV1 retrieves the NodeV1Client +func (c *Clientset) NodeV1() nodev1.NodeV1Interface { + return c.nodeV1 +} + // NodeV1alpha1 retrieves the NodeV1alpha1Client func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return c.nodeV1alpha1 @@ -344,11 +365,6 @@ func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { return c.schedulingV1 } -// SettingsV1alpha1 retrieves the SettingsV1alpha1Client -func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -393,6 +409,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.internalV1alpha1, err = internalv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -489,6 +509,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -497,6 +521,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.nodeV1, err = nodev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -533,10 +561,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -563,6 +587,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.internalV1alpha1 = internalv1alpha1.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) @@ -587,8 +612,10 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) + cs.flowcontrolV1beta1 = flowcontrolv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1 = nodev1.NewForConfigOrDie(c) cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) @@ -598,7 +625,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) - cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) @@ -612,6 +638,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.internalV1alpha1 = internalv1alpha1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) @@ -636,8 +663,10 @@ func New(c rest.Interface) *Clientset { cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) + cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1 = nodev1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) @@ -647,7 +676,6 @@ func New(c rest.Interface) *Clientset { cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) - cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) cs.storageV1alpha1 = storagev1alpha1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 2710bf2de..5601e20dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -21,6 +21,7 @@ package scheme import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -45,8 +46,10 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -56,7 +59,6 @@ import ( schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -73,6 +75,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, @@ -97,8 +100,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, + flowcontrolv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1.AddToScheme, nodev1alpha1.AddToScheme, nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, @@ -108,7 +113,6 @@ var localSchemeBuilder = runtime.SchemeBuilder{ schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, - settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index 8d3a8d8e1..e43a9a368 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,27 +19,27 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type SettingsV1alpha1Interface interface { +type InternalV1alpha1Interface interface { RESTClient() rest.Interface - PodPresetsGetter + StorageVersionsGetter } -// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. -type SettingsV1alpha1Client struct { +// InternalV1alpha1Client is used to interact with features provided by the internal.apiserver.k8s.io group. +type InternalV1alpha1Client struct { restClient rest.Interface } -func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { - return newPodPresets(c, namespace) +func (c *InternalV1alpha1Client) StorageVersions() StorageVersionInterface { + return newStorageVersions(c) } -// NewForConfig creates a new SettingsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { +// NewForConfig creates a new InternalV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*InternalV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +48,12 @@ func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { if err != nil { return nil, err } - return &SettingsV1alpha1Client{client}, nil + return &InternalV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// NewForConfigOrDie creates a new InternalV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *InternalV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,9 +61,9 @@ func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { return client } -// New creates a new SettingsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *SettingsV1alpha1Client { - return &SettingsV1alpha1Client{c} +// New creates a new InternalV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *InternalV1alpha1Client { + return &InternalV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -81,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { +func (c *InternalV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go similarity index 93% rename from vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go index 23d9f94d5..f2835e607 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type PodPresetExpansion interface{} +type StorageVersionExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go new file mode 100644 index 000000000..af5466b04 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StorageVersionsGetter has a method to return a StorageVersionInterface. +// A group's client should implement this interface. +type StorageVersionsGetter interface { + StorageVersions() StorageVersionInterface +} + +// StorageVersionInterface has methods to work with StorageVersion resources. +type StorageVersionInterface interface { + Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) + Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersion, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) + StorageVersionExpansion +} + +// storageVersions implements StorageVersionInterface +type storageVersions struct { + client rest.Interface +} + +// newStorageVersions returns a StorageVersions +func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { + return &storageVersions{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. +func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Get(). + Resource("storageversions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. +func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageVersionList{} + err = c.client.Get(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageVersions. +func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Post(). + Resource("storageversions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. +func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Put(). + Resource("storageversions"). + Name(storageVersion.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Put(). + Resource("storageversions"). + Name(storageVersion.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersion). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. +func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageversions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageversions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageVersion. +func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + result = &v1alpha1.StorageVersion{} + err = c.client.Patch(pt). + Resource("storageversions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go new file mode 100644 index 000000000..9a8ba560e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta1Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta1Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta1Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta1Client { + return &FlowcontrolV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go new file mode 100644 index 000000000..398f4f347 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + result = &v1beta1.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go new file mode 100644 index 000000000..fc15f6935 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go new file mode 100644 index 000000000..88633c827 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/api/flowcontrol/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + result = &v1beta1.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go new file mode 100644 index 000000000..e2c25926f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go new file mode 100644 index 000000000..7f0da811b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/node/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1Client struct { + restClient rest.Interface +} + +func (c *NodeV1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1Client { + return &NodeV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go new file mode 100644 index 000000000..df8c1cafe --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -0,0 +1,168 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "k8s.io/api/node/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (*v1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (*v1.RuntimeClass, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RuntimeClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RuntimeClassList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(runtimeClass). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(runtimeClass). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { + result = &v1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go deleted file mode 100644 index aa1cb364e..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "k8s.io/api/settings/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodPresetsGetter has a method to return a PodPresetInterface. -// A group's client should implement this interface. -type PodPresetsGetter interface { - PodPresets(namespace string) PodPresetInterface -} - -// PodPresetInterface has methods to work with PodPreset resources. -type PodPresetInterface interface { - Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (*v1alpha1.PodPreset, error) - Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (*v1alpha1.PodPreset, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodPreset, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodPresetList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) - PodPresetExpansion -} - -// podPresets implements PodPresetInterface -type podPresets struct { - client rest.Interface - ns string -} - -// newPodPresets returns a PodPresets -func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { - return &podPresets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodPresetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podPreset). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podpresets"). - Name(podPreset.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podPreset). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *podPresets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podPresets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go index 6fb53cecf..c10899792 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -18,11 +18,12 @@ package clientauthentication import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta @@ -37,7 +38,7 @@ type ExecCredential struct { Status *ExecCredentialStatus } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Response is populated when the transport encounters HTTP status codes, such as 401, @@ -49,6 +50,13 @@ type ExecCredentialSpec struct { // interactive prompt. // +optional Interactive bool + + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster } // ExecCredentialStatus holds credentials for the transport to use. @@ -58,13 +66,13 @@ type ExecCredentialStatus struct { ExpirationTimestamp *metav1.Time // Token is a bearer token used by the client for request authentication. // +optional - Token string + Token string `datapolicy:"token"` // PEM-encoded client TLS certificate. // +optional ClientCertificateData string // PEM-encoded client TLS private key. // +optional - ClientKeyData string + ClientKeyData string `datapolicy:"secret-key"` } // Response defines metadata about a failed request, including HTTP status code and @@ -75,3 +83,56 @@ type Response struct { // Code is the HTTP status code returned by the server. Code int32 } + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.Object +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go new file mode 100644 index 000000000..572e049f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // This conversion intentionally omits the Cluster field which is only supported in newer versions. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index c714e2457..1ff13c438 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -37,7 +37,7 @@ type ExecCredential struct { Status *ExecCredentialStatus `json:"status,omitempty"` } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { // Response is populated when the transport encounters HTTP status codes, such as 401, @@ -61,11 +61,11 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty"` + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` } // Response defines metadata about a failed request, including HTTP status code and diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go index 461c20b29..b0e503af4 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -51,11 +51,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { @@ -76,6 +71,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } return nil } @@ -119,14 +119,10 @@ func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialS func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { out.Response = (*Response)(unsafe.Pointer(in.Response)) out.Interactive = in.Interactive + // WARNING: in.Cluster requires manual conversion: does not exist in peer-type return nil } -// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function. -func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) -} - func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go index f543806ac..441b7c44b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go @@ -17,10 +17,12 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" - clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/client-go/pkg/apis/clientauthentication" ) func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - return nil + // This conversion intentionally omits the Response and Interactive fields, which were only + // supported in v1alpha1. + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go index d6e267452..fabc6f65e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -18,17 +18,17 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` - // Spec holds information passed to the plugin by the transport. This contains - // request and runtime specific information, such as if the session is interactive. + // Spec holds information passed to the plugin by the transport. Spec ExecCredentialSpec `json:"spec,omitempty"` // Status is filled in by the plugin and holds the credentials that the transport @@ -37,9 +37,16 @@ type ExecCredential struct { Status *ExecCredentialStatus `json:"status,omitempty"` } -// ExecCredenitalSpec holds request and runtime specific information provided by +// ExecCredentialSpec holds request and runtime specific information provided by // the transport. -type ExecCredentialSpec struct{} +type ExecCredentialSpec struct { + // Cluster contains information to allow an exec plugin to communicate with the + // kubernetes cluster being authenticated to. Note that Cluster is non-nil only + // when provideClusterInfo is set to true in the exec provider config (i.e., + // ExecConfig.ProvideClusterInfo). + // +optional + Cluster *Cluster `json:"cluster,omitempty"` +} // ExecCredentialStatus holds credentials for the transport to use. // @@ -51,9 +58,62 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // PEM-encoded client TLS certificates (including intermediates, if any). ClientCertificateData string `json:"clientCertificateData,omitempty"` // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty"` + ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` +} + +// Cluster contains information to allow an exec plugin to communicate +// with the kubernetes cluster being authenticated to. +// +// To ensure that this struct contains everything someone would need to communicate +// with a kubernetes cluster (just like they would via a kubeconfig), the fields +// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception +// of CertificateAuthority, since CA data will always be passed to the plugin as bytes. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // TLSServerName is passed to the server for SNI and is used in the client to + // check server certificates against. If ServerName is empty, the hostname + // used to contact the server is used. + // +optional + TLSServerName string `json:"tls-server-name,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. + // This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CAData contains PEM-encoded certificate authority certificates. + // If empty, system roots should be used. + // +listType=atomic + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // ProxyURL is the URL to the proxy to be used for all requests to this + // cluster. + // +optional + ProxyURL string `json:"proxy-url,omitempty"` + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's + // extensions[client.authentication.k8s.io/exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +optional + Config runtime.RawExtension `json:"config,omitempty"` } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 0e533e465..90f7935fe 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -36,6 +36,16 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_Cluster_To_v1beta1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) }); err != nil { @@ -69,6 +79,40 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function. +func Convert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error { + return autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in, out, s) +} + +func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + out.Server = in.Server + out.TLSServerName = in.TLSServerName + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) + out.ProxyURL = in.ProxyURL + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { + return err + } + return nil +} + +// Convert_clientauthentication_Cluster_To_v1beta1_Cluster is an autogenerated conversion function. +func Convert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in, out, s) +} + func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -96,6 +140,15 @@ func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *c } func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(clientauthentication.Cluster) + if err := Convert_v1beta1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } return nil } @@ -107,6 +160,15 @@ func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSp func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { // WARNING: in.Response requires manual conversion: does not exist in peer-type // WARNING: in.Interactive requires manual conversion: does not exist in peer-type + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + if err := Convert_clientauthentication_Cluster_To_v1beta1_Cluster(*in, *out, s); err != nil { + return err + } + } else { + out.Cluster = nil + } return nil } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go index 736b8cf00..3a72ece0c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -24,11 +24,33 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in out.TypeMeta = in.TypeMeta - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status *out = new(ExecCredentialStatus) @@ -58,6 +80,11 @@ func (in *ExecCredential) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index c568a6fc8..045b07f5b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -24,6 +24,30 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { *out = *in @@ -63,6 +87,11 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = new(Response) (*in).DeepCopyInto(*out) } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 627bb2de9..af21c4995 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -87,8 +87,15 @@ func newCache() *cache { var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} -func cacheKey(c *api.ExecConfig) string { - return spewConfig.Sprint(c) +func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { + key := struct { + conf *api.ExecConfig + cluster *clientauthentication.Cluster + }{ + conf: conf, + cluster: cluster, + } + return spewConfig.Sprint(key) } type cache struct { @@ -155,12 +162,12 @@ func (s *sometimes) Do(f func()) { } // GetAuthenticator returns an exec-based plugin for providing client credentials. -func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) { - return newAuthenticator(globalCache, config) +func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { + return newAuthenticator(globalCache, config, cluster) } -func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) { - key := cacheKey(config) +func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) { + key := cacheKey(config, cluster) if a, ok := c.get(key); ok { return a, nil } @@ -171,9 +178,11 @@ func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) } a := &Authenticator{ - cmd: config.Command, - args: config.Args, - group: gv, + cmd: config.Command, + args: config.Args, + group: gv, + cluster: cluster, + provideClusterInfo: config.ProvideClusterInfo, installHint: config.InstallHint, sometimes: &sometimes{ @@ -200,10 +209,12 @@ func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) // The plugin input and output are defined by the API group client.authentication.k8s.io. type Authenticator struct { // Set by the config - cmd string - args []string - group schema.GroupVersion - env []string + cmd string + args []string + group schema.GroupVersion + env []string + cluster *clientauthentication.Cluster + provideClusterInfo bool // Used to avoid log spew by rate limiting install hint printing. We didn't do // this by interval based rate limiting alone since that way may have prevented @@ -230,8 +241,8 @@ type Authenticator struct { } type credentials struct { - token string - cert *tls.Certificate + token string `datapolicy:"token"` + cert *tls.Certificate `datapolicy:"secret-key"` } // UpdateTransportConfig updates the transport.Config to use credentials @@ -367,19 +378,16 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err Interactive: a.interactive, }, } + if a.provideClusterInfo { + cred.Spec.Cluster = a.cluster + } env := append(a.environ(), a.env...) - if a.group == v1alpha1.SchemeGroupVersion { - // Input spec disabled for beta due to lack of use. Possibly re-enable this later if - // someone wants it back. - // - // See: https://github.com/kubernetes/kubernetes/issues/61796 - data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) - if err != nil { - return fmt.Errorf("encode ExecCredentials: %v", err) - } - env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) + data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) + if err != nil { + return fmt.Errorf("encode ExecCredentials: %v", err) } + env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) stdout := &bytes.Buffer{} cmd := exec.Command(a.cmd, a.args...) diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 6e50eef51..3735750bb 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -65,12 +65,12 @@ type Config struct { // Server requires Basic authentication Username string - Password string + Password string `datapolicy:"password"` // Server requires Bearer authentication. This client will not attempt to use // refresh tokens for an OAuth2 flow. // TODO: demonstrate an OAuth2 compatible client. - BearerToken string + BearerToken string `datapolicy:"token"` // Path to a file containing a BearerToken. // If set, the contents are periodically read. @@ -125,6 +125,7 @@ type Config struct { // WarningHandler handles warnings in server responses. // If not set, the default warning handler is used. + // See documentation for SetDefaultWarningHandler() for details. WarningHandler WarningHandler // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. @@ -133,7 +134,7 @@ type Config struct { // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) - // Proxy is the the proxy func to be used for all requests made by this + // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // returns a nil *URL, no proxy is used. // @@ -159,6 +160,15 @@ func (sanitizedAuthConfigPersister) String() string { return "rest.AuthProviderConfigPersister(--- REDACTED ---)" } +type sanitizedObject struct{ runtime.Object } + +func (sanitizedObject) GoString() string { + return "runtime.Object(--- REDACTED ---)" +} +func (sanitizedObject) String() string { + return "runtime.Object(--- REDACTED ---)" +} + // GoString implements fmt.GoStringer and sanitizes sensitive fields of Config // to prevent accidental leaking via logs. func (c *Config) GoString() string { @@ -182,7 +192,9 @@ func (c *Config) String() string { if cc.AuthConfigPersister != nil { cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} } - + if cc.ExecProvider != nil && cc.ExecProvider.Config != nil { + cc.ExecProvider.Config = sanitizedObject{Object: cc.ExecProvider.Config} + } return fmt.Sprintf("%#v", cc) } @@ -203,7 +215,7 @@ type TLSClientConfig struct { // Server should be accessed without verifying the TLS certificate. For testing only. Insecure bool // ServerName is passed to the server for SNI and is used in the client to check server - // ceritificates against. If ServerName is empty, the hostname used to contact the + // certificates against. If ServerName is empty, the hostname used to contact the // server is used. ServerName string @@ -219,7 +231,7 @@ type TLSClientConfig struct { CertData []byte // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). // KeyData takes precedence over KeyFile - KeyData []byte + KeyData []byte `datapolicy:"security-key"` // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte @@ -587,7 +599,7 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { - return &Config{ + c := &Config{ Host: config.Host, APIPath: config.APIPath, ContentConfig: config.ContentConfig, @@ -626,4 +638,8 @@ func CopyConfig(config *Config) *Config { Dial: config.Dial, Proxy: config.Proxy, } + if config.ExecProvider != nil && config.ExecProvider.Config != nil { + c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject() + } + return c } diff --git a/vendor/k8s.io/client-go/rest/exec.go b/vendor/k8s.io/client-go/rest/exec.go new file mode 100644 index 000000000..5f3b43c55 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/exec.go @@ -0,0 +1,85 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/http" + "net/url" + + "k8s.io/client-go/pkg/apis/clientauthentication" + clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication" +) + +// This file contains Config logic related to exec credential plugins. + +// ConfigToExecCluster creates a clientauthenticationapi.Cluster with the corresponding fields from +// the provided Config. +func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, error) { + caData, err := dataFromSliceOrFile(config.CAData, config.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to load CA bundle for execProvider: %v", err) + } + + var proxyURL string + if config.Proxy != nil { + req, err := http.NewRequest("", config.Host, nil) + if err != nil { + return nil, fmt.Errorf("failed to create proxy URL request for execProvider: %w", err) + } + url, err := config.Proxy(req) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL for execProvider: %w", err) + } + if url != nil { + proxyURL = url.String() + } + } + + return &clientauthentication.Cluster{ + Server: config.Host, + TLSServerName: config.ServerName, + InsecureSkipTLSVerify: config.Insecure, + CertificateAuthorityData: caData, + ProxyURL: proxyURL, + Config: config.ExecProvider.Config, + }, nil +} + +// ExecClusterToConfig creates a Config with the corresponding fields from the provided +// clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have +// any authentication-related fields set). +func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) { + var proxy func(*http.Request) (*url.URL, error) + if cluster.ProxyURL != "" { + proxyURL, err := url.Parse(cluster.ProxyURL) + if err != nil { + return nil, fmt.Errorf("cannot parse proxy URL: %w", err) + } + proxy = http.ProxyURL(proxyURL) + } + + return &Config{ + Host: cluster.Server, + TLSClientConfig: TLSClientConfig{ + Insecure: cluster.InsecureSkipTLSVerify, + ServerName: cluster.TLSServerName, + CAData: cluster.CertificateAuthorityData, + }, + Proxy: proxy, + }, nil +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0ed7def73..1ccc0dafe 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -511,13 +511,23 @@ func (r Request) finalURLTemplate() url.URL { } r.params = newParams url := r.URL() - segments := strings.Split(r.URL().Path, "/") + + segments := strings.Split(url.Path, "/") groupIndex := 0 index := 0 - if r.URL() != nil && r.c.base != nil && strings.Contains(r.URL().Path, r.c.base.Path) { - groupIndex += len(strings.Split(r.c.base.Path, "/")) + trimmedBasePath := "" + if url != nil && r.c.base != nil && strings.Contains(url.Path, r.c.base.Path) { + p := strings.TrimPrefix(url.Path, r.c.base.Path) + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + // store the base path that we have trimmed so we can append it + // before returning the URL + trimmedBasePath = r.c.base.Path + segments = strings.Split(p, "/") + groupIndex = 1 } - if groupIndex >= len(segments) { + if len(segments) <= 2 { return *url } @@ -563,7 +573,7 @@ func (r Request) finalURLTemplate() url.URL { segments[index+3] = "{name}" } } - url.Path = path.Join(segments...) + url.Path = path.Join(trimmedBasePath, path.Join(segments...)) return *url } @@ -638,7 +648,7 @@ func (b *throttledLogger) attemptToLog() (klog.Level, bool) { return -1, false } -// Infof will write a log message at each logLevel specified by the reciever's throttleSettings +// Infof will write a log message at each logLevel specified by the receiver's throttleSettings // as long as it hasn't written a log message more recently than minLogInterval. func (b *throttledLogger) Infof(message string, args ...interface{}) { if logLevel, ok := b.attemptToLog(); ok { diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 450edc6ed..87792750a 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -21,6 +21,7 @@ import ( "errors" "net/http" + "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/plugin/pkg/client/auth/exec" "k8s.io/client-go/transport" ) @@ -94,7 +95,15 @@ func (c *Config) TransportConfig() (*transport.Config, error) { } if c.ExecProvider != nil { - provider, err := exec.GetAuthenticator(c.ExecProvider) + var cluster *clientauthentication.Cluster + if c.ExecProvider.ProvideClusterInfo { + var err error + cluster, err = ConfigToExecCluster(c) + if err != nil { + return nil, err + } + } + provider, err := exec.GetAuthenticator(c.ExecProvider, cluster) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/rest/warnings.go b/vendor/k8s.io/client-go/rest/warnings.go index 45c1c3b2c..18476f5ff 100644 --- a/vendor/k8s.io/client-go/rest/warnings.go +++ b/vendor/k8s.io/client-go/rest/warnings.go @@ -38,8 +38,11 @@ var ( defaultWarningHandlerLock sync.RWMutex ) -// SetDefaultWarningHandler sets the default handler client uses when warning headers are encountered. -// By default, warnings are printed to stderr. +// SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered. +// By default, warnings are logged. Several built-in implementations are provided: +// - NoWarnings suppresses warnings. +// - WarningLogger logs warnings. +// - NewWarningWriter() outputs warnings to the provided writer. func SetDefaultWarningHandler(l WarningHandler) { defaultWarningHandlerLock.Lock() defer defaultWarningHandlerLock.Unlock() diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index c34172677..4c24f7997 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -75,11 +75,11 @@ import ( // to be read/written from a file as a JSON object. type Info struct { User string - Password string + Password string `datapolicy:"password"` CAFile string CertFile string KeyFile string - BearerToken string + BearerToken string `datapolicy:"token"` Insecure *bool } diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 7bbe63542..9d0a18771 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -38,6 +38,5 @@ reviewers: - resouer - jessfraz - mfojtik -- mqliang - sdminonne - ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 3ad9b53bb..684d1a8d3 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -72,6 +72,9 @@ type Config struct { // Called whenever the ListAndWatch drops the connection with an error. WatchErrorHandler WatchErrorHandler + + // WatchListPageSize is the requested chunk size of initial and relist watch lists. + WatchListPageSize int64 } // ShouldResyncFunc is a type of function that indicates if a reflector should perform a @@ -134,6 +137,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.config.FullResyncPeriod, ) r.ShouldResync = c.config.ShouldResync + r.WatchListPageSize = c.config.WatchListPageSize r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 2774f4f21..148b478d5 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -145,7 +145,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { // DeltaFIFO's Pop(), Get(), and GetByKey() methods return // interface{} to satisfy the Store/Queue interfaces, but they // will always return an object of type Deltas. List() returns -// the newest objects currently in the FIFO. +// the newest object from each accumulator in the FIFO. // // A DeltaFIFO's knownObjects KeyListerGetter provides the abilities // to list Store keys and to get objects by Store key. The objects in @@ -161,12 +161,13 @@ type DeltaFIFO struct { lock sync.RWMutex cond sync.Cond - // `items` maps keys to Deltas. - // `queue` maintains FIFO order of keys for consumption in Pop(). - // We maintain the property that keys in the `items` and `queue` are - // strictly 1:1 mapping, and that all Deltas in `items` should have - // at least one Delta. + // `items` maps a key to a Deltas. + // Each such Deltas has at least one Delta. items map[string]Deltas + + // `queue` maintains FIFO order of keys for consumption in Pop(). + // There are no duplicates in `queue`. + // A key is in `queue` if and only if it is in `items`. queue []string // populated is true if the first batch of items inserted by Replace() has been populated @@ -376,8 +377,8 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err if err != nil { return KeyError{obj, err} } - - newDeltas := append(f.items[id], Delta{actionType, obj}) + oldDeltas := f.items[id] + newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) if len(newDeltas) > 0 { @@ -389,10 +390,14 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err } else { // This never happens, because dedupDeltas never returns an empty list // when given a non-empty list (as it is here). - // But if somehow it ever does return an empty list, then - // We need to remove this from our map (extra items in the queue are - // ignored if they are not in the map). - delete(f.items, id) + // If somehow it happens anyway, deal with it but complain. + if oldDeltas == nil { + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) + return nil + } + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) + f.items[id] = newDeltas + return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) } return nil } @@ -459,7 +464,7 @@ func (f *DeltaFIFO) IsClosed() bool { return f.closed } -// Pop blocks until an item is added to the queue, and then returns it. If +// Pop blocks until the queue has some items, and then returns one. If // multiple items are ready, they are returned in the order in which they were // added/updated. The item is removed from the queue (and the store) before it // is returned, so if you don't successfully process it, you need to add it back @@ -494,7 +499,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } item, ok := f.items[id] if !ok { - // Item may have been deleted subsequently. + // This should never happen + klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) continue } delete(f.items, id) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index e995abe25..360d7304b 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -69,6 +69,8 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager + // initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch. + initConnBackoffManager wait.BackoffManager resyncPeriod time.Duration // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked @@ -99,6 +101,15 @@ type Reflector struct { watchErrorHandler WatchErrorHandler } +// ResourceVersionUpdater is an interface that allows store implementation to +// track the current resource version of the reflector. This is especially +// important if storage bookmarks are enabled. +type ResourceVersionUpdater interface { + // UpdateResourceVersion is called each time current resource version of the reflector + // is updated. + UpdateResourceVersion(resourceVersion string) +} + // The WatchErrorHandler is called whenever ListAndWatch drops the // connection with an error. After calling this handler, the informer // will backoff and retry. @@ -166,10 +177,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, - watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + resyncPeriod: resyncPeriod, + clock: realClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), } r.setExpectedType(expectedType) return r @@ -404,9 +416,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // If this is "connection refused" error, it means that most likely apiserver is not responsive. // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. - // If that's the case wait and resend watch request. + // If that's the case begin exponentially backing off and resend watch request. if utilnet.IsConnectionRefused(err) { - time.Sleep(time.Second) + <-r.initConnBackoffManager.Backoff().C() continue } return err @@ -504,6 +516,9 @@ loop: } *resourceVersion = newResourceVersion r.setLastSyncResourceVersion(newResourceVersion) + if rvu, ok := r.store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(newResourceVersion) + } eventCount++ } } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index f48989763..3a3f538a1 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -485,13 +485,13 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 829424dcf..24f469236 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -114,10 +114,10 @@ type AuthInfo struct { ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` @@ -135,7 +135,7 @@ type AuthInfo struct { Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` @@ -215,6 +215,36 @@ type ExecConfig struct { // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` + + // ProvideClusterInfo determines whether or not to provide cluster information, + // which could potentially contain very large CA data, to this exec plugin as a + // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set + // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for + // reading this environment variable. + ProvideClusterInfo bool `json:"provideClusterInfo"` + + // Config holds additional config data that is specific to the exec + // plugin with regards to the cluster being authenticated to. + // + // This data is sourced from the clientcmd Cluster object's extensions[exec] field: + // + // clusters: + // - name: my-cluster + // cluster: + // ... + // extensions: + // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config + // extension: + // audience: 06e3fbd18de8 # arbitrary config + // + // In some environments, the user config may be exactly the same across many clusters + // (i.e. call this exec plugin) minus some details that are specific to each cluster + // such as the audience. This field allows the per cluster config to be directly + // specified with the cluster info. Using this field to store secret data is not + // recommended as one of the prime benefits of exec plugins is that no secrets need + // to be stored directly in the kubeconfig. + // +k8s:conversion-gen=false + Config runtime.Object } var _ fmt.Stringer = new(ExecConfig) @@ -237,7 +267,11 @@ func (c ExecConfig) String() string { if len(c.Env) > 0 { env = "[]ExecEnvVar{--- REDACTED ---}" } - return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) + config := "runtime.Object(nil)" + if c.Config != nil { + config = "runtime.Object(--- REDACTED ---)" + } + return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config) } // ExecEnvVar is used for setting environment variables when executing an exec-based diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 0395f860f..8c29b39c1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -104,10 +104,10 @@ type AuthInfo struct { ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey // +optional - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` // Token is the bearer token for authentication to the kubernetes cluster. // +optional - Token string `json:"token,omitempty"` + Token string `json:"token,omitempty" datapolicy:"token"` // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` @@ -125,7 +125,7 @@ type AuthInfo struct { Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. // +optional - Password string `json:"password,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` @@ -214,6 +214,13 @@ type ExecConfig struct { // present. For example, `brew install foo-cli` might be a good InstallHint for // foo-cli on Mac OS systems. InstallHint string `json:"installHint,omitempty"` + + // ProvideClusterInfo determines whether or not to provide cluster information, + // which could potentially contain very large CA data, to this exec plugin as a + // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set + // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for + // reading this environment variable. + ProvideClusterInfo bool `json:"provideClusterInfo"` } // ExecEnvVar is used for setting environment variables when executing an exec-based diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index bf9eaeca3..26e96529d 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -171,7 +171,15 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s out.Username = in.Username out.Password = in.Password out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - out.Exec = (*api.ExecConfig)(unsafe.Pointer(in.Exec)) + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(api.ExecConfig) + if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -197,7 +205,15 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s out.Username = in.Username out.Password = in.Password out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - out.Exec = (*ExecConfig)(unsafe.Pointer(in.Exec)) + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecConfig) + if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -359,6 +375,7 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint + out.ProvideClusterInfo = in.ProvideClusterInfo return nil } @@ -373,6 +390,8 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env)) out.APIVersion = in.APIVersion out.InstallHint = in.InstallHint + out.ProvideClusterInfo = in.ProvideClusterInfo + // INFO: in.Config opted out of conversion generation return nil } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index 3240a7a98..a04de6260 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -267,6 +267,9 @@ func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { *out = make([]ExecEnvVar, len(*in)) copy(*out, *in) } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } return } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 690afce0c..9e1cd64a0 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -34,6 +34,11 @@ import ( "github.com/imdario/mergo" ) +const ( + // clusterExtensionKey is reserved in the cluster extensions list for exec plugin config. + clusterExtensionKey = "client.authentication.k8s.io/exec" +) + var ( // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields // DEPRECATED will be replaced @@ -72,7 +77,7 @@ type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderC type promptedCredentials struct { username string - password string + password string `datapolicy:"password"` } // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information @@ -189,7 +194,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { authInfoName, _ := config.getAuthInfoName() persister = PersisterForUser(config.configAccess, authInfoName) } - userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo) if err != nil { return nil, err } @@ -232,7 +237,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { mergedConfig := &restclient.Config{} // blindly overwrite existing values based on precedence @@ -271,6 +276,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if configAuthInfo.Exec != nil { mergedConfig.ExecProvider = configAuthInfo.Exec mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint) + mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey] } // if there still isn't enough information to authenticate the user, try prompting diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 5f1660bf9..a7eae66bf 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -83,10 +83,13 @@ func (o *PathOptions) GetEnvVarFiles() []string { } func (o *PathOptions) GetLoadingPrecedence() []string { + if o.IsExplicitFile() { + return []string{o.GetExplicitFile()} + } + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { return envVarFiles } - return []string{o.GlobalFile} } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index b0672291a..901ed50c4 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -304,6 +304,10 @@ func (rules *ClientConfigLoadingRules) Migrate() error { // GetLoadingPrecedence implements ConfigAccess func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + if len(rules.ExplicitPath) > 0 { + return []string{rules.ExplicitPath} + } + return rules.Precedence } diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index fa2afb1f1..5fe768ed5 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -44,7 +44,7 @@ type tlsCacheKey struct { insecure bool caData string certData string - keyData string + keyData string `datapolicy:"security-key"` certFile string keyFile string serverName string diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 45db24864..070474831 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -35,10 +35,10 @@ type Config struct { // Username and password for basic authentication Username string - Password string + Password string `datapolicy:"password"` // Bearer token for authentication - BearerToken string + BearerToken string `datapolicy:"token"` // Path to a file containing a BearerToken. // If set, the contents are periodically read. @@ -70,7 +70,7 @@ type Config struct { // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) - // Proxy is the the proxy func to be used for all requests made by this + // Proxy is the proxy func to be used for all requests made by this // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // returns a nil *URL, no proxy is used. // @@ -108,7 +108,7 @@ func (c *Config) HasCertAuth() bool { return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0) } -// HasCertCallbacks returns whether the configuration has certificate callback or not. +// HasCertCallback returns whether the configuration has certificate callback or not. func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index a05208d92..056bc023c 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -146,6 +146,7 @@ type userAgentRoundTripper struct { rt http.RoundTripper } +// NewUserAgentRoundTripper will add User-Agent header to a request unless it has already been set. func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { return &userAgentRoundTripper{agent, rt} } @@ -167,7 +168,7 @@ func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { retur type basicAuthRoundTripper struct { username string - password string + password string `datapolicy:"password"` rt http.RoundTripper } @@ -260,7 +261,7 @@ func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTr return &bearerAuthRoundTripper{bearer, nil, rt} } -// NewBearerAuthRoundTripper adds the provided bearer token to a request +// NewBearerAuthWithRefreshRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. // If tokenFile is non-empty, it is periodically read, // and the last successfully read content is used as the bearer token. @@ -305,7 +306,7 @@ func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { retu // requestInfo keeps track of information about a request/response combination type requestInfo struct { - RequestHeaders http.Header + RequestHeaders http.Header `datapolicy:"token"` RequestVerb string RequestURL string @@ -340,6 +341,7 @@ func (r *requestInfo) toCurl() string { headers := "" for key, values := range r.RequestHeaders { for _, value := range values { + value = maskValue(key, value) headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) } } diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod deleted file mode 100644 index 3877d8546..000000000 --- a/vendor/k8s.io/klog/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module k8s.io/klog - -go 1.12 - -require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum deleted file mode 100644 index fb64d277a..000000000 --- a/vendor/k8s.io/klog/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 2f9f6f0d8..64d29622e 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -27,7 +27,7 @@ Historical context is available here: How to use klog =============== -- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"` - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) @@ -35,6 +35,10 @@ How to use klog **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. +### Coexisting with klog/v2 + +See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. + ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md new file mode 100644 index 000000000..2083d44cd --- /dev/null +++ b/vendor/k8s.io/klog/v2/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Security Announcements + +Join the [kubernetes-security-announce] group for security and vulnerability announcements. + +You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. + +## Reporting a Vulnerability + +Instructions for reporting a vulnerability can be found on the +[Kubernetes Security and Disclosure Information] page. + +## Supported Versions + +Information about supported Kubernetes versions can be found on the +[Kubernetes version and version skew support policy] page on the Kubernetes website. + +[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce +[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 +[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions +[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod deleted file mode 100644 index e396e31c0..000000000 --- a/vendor/k8s.io/klog/v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module k8s.io/klog/v2 - -go 1.13 - -require github.com/go-logr/logr v0.2.0 diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum deleted file mode 100644 index 8dfa78542..000000000 --- a/vendor/k8s.io/klog/v2/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index ae2b86138..23cced625 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -413,6 +413,7 @@ func init() { logging.skipHeaders = false logging.addDirHeader = false logging.skipLogHeaders = false + logging.oneOutput = false go logging.flushDaemon() } @@ -432,6 +433,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -505,6 +507,12 @@ type loggingT struct { // If set, all output will be redirected unconditionally to the provided logr.Logger logr logr.Logger + + // If true, messages will not be propagated to lower severity log levels + oneOutput bool + + // If set, all output will be filtered through the filter. + filter LogFilter } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -687,7 +695,7 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { +func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -695,15 +703,18 @@ func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprintln(buf, args...) l.output(s, logr, buf, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, args ...interface{}) { - l.printDepth(s, logr, 1, args...) +func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logr, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -711,6 +722,9 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...i l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -718,7 +732,7 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...i l.output(s, logr, buf, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -726,6 +740,9 @@ func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...i l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + format, args = filter.FilterF(format, args) + } fmt.Fprintf(buf, format, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -736,7 +753,7 @@ func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...i // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -744,6 +761,9 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -752,18 +772,24 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } if loggr != nil { - loggr.Error(err, msg, keysAndValues) + loggr.Error(err, msg, keysAndValues...) return } l.printS(err, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } if loggr != nil { - loggr.Info(msg, keysAndValues) + loggr.Info(msg, keysAndValues...) return } l.printS(nil, msg, keysAndValues...) @@ -785,7 +811,7 @@ func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { } else { s = errorLog } - l.printDepth(s, logging.logr, 2, b) + l.printDepth(s, logging.logr, nil, 2, b) } const missingValue = "(MISSING)" @@ -919,18 +945,22 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, } } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) + if l.oneOutput { + l.file[s].Write(data) + } else { + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } } @@ -1077,11 +1107,19 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { } var err error sb.file, _, err = create(severityName[sb.sev], now, startup) - sb.nbytes = 0 if err != nil { return err } - + if startup { + fileInfo, err := sb.file.Stat() + if err != nil { + return fmt.Errorf("file stat could not get fileinfo: %v", err) + } + // init file size + sb.nbytes = uint64(fileInfo.Size()) + } else { + sb.nbytes = 0 + } sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) if sb.logger.skipLogHeaders { @@ -1197,7 +1235,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, file, line, true, text) + logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) return len(b), nil } @@ -1232,13 +1270,14 @@ func (l *loggingT) setV(pc uintptr) Level { type Verbose struct { enabled bool logr logr.Logger + filter LogFilter } func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { - return Verbose{b, nil} + return Verbose{b, nil, logging.filter} } - return Verbose{b, logging.logr.V(int(level))} + return Verbose{b, logging.logr.V(int(level)), logging.filter} } // V reports whether verbosity at the call site is at least the requested level. @@ -1265,7 +1304,7 @@ func V(level Level) Verbose { return newVerbose(level, true) } - // It's off globally but it vmodule may still be set. + // It's off globally but vmodule may still be set. // Here is another cheap but safe test to see if vmodule is enabled. if atomic.LoadInt32(&logging.filterLength) > 0 { // Now we need a proper lock to use the logging structure. The pcs field @@ -1296,7 +1335,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, args...) + logging.print(infoLog, v.logr, v.filter, args...) } } @@ -1304,7 +1343,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, args...) + logging.println(infoLog, v.logr, v.filter, args...) } } @@ -1312,7 +1351,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, format, args...) + logging.printf(infoLog, v.logr, v.filter, format, args...) } } @@ -1320,40 +1359,47 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, msg, keysAndValues...) + logging.infoS(v.logr, v.filter, msg, keysAndValues...) } } -// Error is equivalent to the global Error function, guarded by the value of v. -// See the documentation of V for usage. +// Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, msg, args...) + logging.errorS(err, v.logr, v.filter, msg, args...) + } +} + +// ErrorS is equivalent to the global Error function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.errorS(err, v.logr, v.filter, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, args...) + logging.print(infoLog, logging.logr, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, depth, args...) + logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, args...) + logging.println(infoLog, logging.logr, logging.filter, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, format, args...) + logging.printf(infoLog, logging.logr, logging.filter, format, args...) } // InfoS structured logs to the INFO log. @@ -1365,55 +1411,55 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, msg, keysAndValues...) + logging.infoS(logging.logr, logging.filter, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, args...) + logging.print(warningLog, logging.logr, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, depth, args...) + logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, args...) + logging.println(warningLog, logging.logr, logging.filter, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, format, args...) + logging.printf(warningLog, logging.logr, logging.filter, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, args...) + logging.print(errorLog, logging.logr, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, depth, args...) + logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, args...) + logging.println(errorLog, logging.logr, logging.filter, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, format, args...) + logging.printf(errorLog, logging.logr, logging.filter, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1426,34 +1472,34 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, msg, keysAndValues...) + logging.errorS(err, logging.logr, logging.filter, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, args...) + logging.print(fatalLog, logging.logr, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, depth, args...) + logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, args...) + logging.println(fatalLog, logging.logr, logging.filter, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, format, args...) + logging.printf(fatalLog, logging.logr, logging.filter, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1464,27 +1510,42 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, args...) + logging.print(fatalLog, logging.logr, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, depth, args...) + logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, args...) + logging.println(fatalLog, logging.logr, logging.filter, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, format, args...) + logging.printf(fatalLog, logging.logr, logging.filter, format, args...) +} + +// LogFilter is a collection of functions that can filter all logging calls, +// e.g. for sanitization of arguments and prevent accidental leaking of secrets. +type LogFilter interface { + Filter(args []interface{}) []interface{} + FilterF(format string, args []interface{}) (string, []interface{}) + FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) +} + +func SetLogFilter(filter LogFilter) { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.filter = filter } // ObjectRef references a kubernetes object diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 6df0df389..4abcf9b82 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -109,19 +109,39 @@ func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, if _, ok := d.models[reference]; !ok { return nil, newSchemaError(path, "unknown model in reference: %q", reference) } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Ref{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, reference: reference, definitions: d, }, nil } -func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { +func parseDefault(def *openapi_v2.Any) (interface{}, error) { + if def == nil { + return nil, nil + } + var i interface{} + if err := yaml.Unmarshal([]byte(def.Yaml), &i); err != nil { + return nil, err + } + return i, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) (BaseSchema, error) { + def, err := parseDefault(s.GetDefault()) + if err != nil { + return BaseSchema{}, err + } return BaseSchema{ Description: s.GetDescription(), + Default: def, Extensions: VendorExtensionToMap(s.GetVendorExtension()), Path: *path, - } + }, nil } // We believe the schema is a map, verify and return a new schema @@ -132,8 +152,12 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) var sub Schema // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. if s.GetAdditionalProperties().GetSchema() == nil { + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } sub = &Arbitrary{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, } } else { var err error @@ -142,8 +166,12 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) return nil, err } } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Map{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, SubType: sub, }, nil } @@ -165,8 +193,12 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, default: return nil, newSchemaError(path, "Unknown primitive type: %q", t) } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Primitive{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, Type: t, Format: s.GetFormat(), }, nil @@ -188,8 +220,12 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro if err != nil { return nil, err } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Array{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, SubType: sub, }, nil } @@ -216,8 +252,12 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error fieldOrder = append(fieldOrder, name) } + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Kind{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, RequiredFields: s.GetRequired(), Fields: fields, FieldOrder: fieldOrder, @@ -225,8 +265,12 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error } func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + base, err := d.parseBaseSchema(s, path) + if err != nil { + return nil, err + } return &Arbitrary{ - BaseSchema: d.parseBaseSchema(s, path), + BaseSchema: base, }, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 46643aa50..f31a2de2c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -77,6 +77,8 @@ type Schema interface { GetPath() *Path // Describes the field. GetDescription() string + // Default for that schema. + GetDefault() interface{} // Returns type extensions. GetExtensions() map[string]interface{} } @@ -129,6 +131,7 @@ func (p *Path) FieldPath(field string) Path { type BaseSchema struct { Description string Extensions map[string]interface{} + Default interface{} Path Path } @@ -141,6 +144,10 @@ func (b *BaseSchema) GetExtensions() map[string]interface{} { return b.Extensions } +func (b *BaseSchema) GetDefault() interface{} { + return b.Default +} + func (b *BaseSchema) GetPath() *Path { return &b.Path } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1a8da3d4b..2998dd764 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,29 +1,39 @@ -# cloud.google.com/go v0.51.0 +# cloud.google.com/go v0.57.0 +## explicit; go 1.11 cloud.google.com/go/compute/metadata -# github.com/Microsoft/go-winio v0.4.11 +# github.com/Microsoft/go-winio v0.4.17 +## explicit; go 1.12 github.com/Microsoft/go-winio -# github.com/Microsoft/hcsshim v0.8.6 -## explicit +github.com/Microsoft/go-winio/pkg/guid +github.com/Microsoft/go-winio/pkg/security +github.com/Microsoft/go-winio/vhd +# github.com/Microsoft/hcsshim v0.9.2 +## explicit; go 1.13 github.com/Microsoft/hcsshim +github.com/Microsoft/hcsshim/computestorage github.com/Microsoft/hcsshim/hcn github.com/Microsoft/hcsshim/internal/cni -github.com/Microsoft/hcsshim/internal/guestrequest -github.com/Microsoft/hcsshim/internal/guid +github.com/Microsoft/hcsshim/internal/cow github.com/Microsoft/hcsshim/internal/hcs +github.com/Microsoft/hcsshim/internal/hcs/schema1 +github.com/Microsoft/hcsshim/internal/hcs/schema2 github.com/Microsoft/hcsshim/internal/hcserror github.com/Microsoft/hcsshim/internal/hns github.com/Microsoft/hcsshim/internal/interop +github.com/Microsoft/hcsshim/internal/log github.com/Microsoft/hcsshim/internal/logfields github.com/Microsoft/hcsshim/internal/longpath github.com/Microsoft/hcsshim/internal/mergemaps +github.com/Microsoft/hcsshim/internal/oc github.com/Microsoft/hcsshim/internal/regstate github.com/Microsoft/hcsshim/internal/runhcs github.com/Microsoft/hcsshim/internal/safefile -github.com/Microsoft/hcsshim/internal/schema1 -github.com/Microsoft/hcsshim/internal/schema2 github.com/Microsoft/hcsshim/internal/timeout +github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer -# github.com/aws/aws-sdk-go v1.12.54 +github.com/Microsoft/hcsshim/internal/winapi +github.com/Microsoft/hcsshim/osversion +# github.com/aws/aws-sdk-go v1.15.11 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -35,12 +45,16 @@ github.com/aws/aws-sdk-go/aws/credentials github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults github.com/aws/aws-sdk-go/aws/ec2metadata github.com/aws/aws-sdk-go/aws/endpoints github.com/aws/aws-sdk-go/aws/request github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/ec2query @@ -53,31 +67,30 @@ github.com/aws/aws-sdk-go/service/sts # github.com/bronze1man/goStrongswanVici v0.0.0-20201105010758-936f38b697fd ## explicit github.com/bronze1man/goStrongswanVici -# github.com/containernetworking/plugins v0.8.6 -## explicit +# github.com/containerd/cgroups v1.0.1 +## explicit; go 1.13 +github.com/containerd/cgroups/stats/v1 +# github.com/containernetworking/plugins v0.9.1 +## explicit; go 1.14 github.com/containernetworking/plugins/pkg/utils/sysctl -# github.com/coreos/etcd v3.1.11+incompatible -## explicit -github.com/coreos/etcd/client -github.com/coreos/etcd/pkg/fileutil -github.com/coreos/etcd/pkg/pathutil -github.com/coreos/etcd/pkg/tlsutil -github.com/coreos/etcd/pkg/transport -github.com/coreos/etcd/pkg/types -# github.com/coreos/go-iptables v0.4.5 +# github.com/coreos/go-iptables v0.5.0 ## explicit github.com/coreos/go-iptables/iptables -# github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 +# github.com/coreos/go-semver v0.3.0 +## explicit +github.com/coreos/go-semver/semver +# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e ## explicit github.com/coreos/go-systemd/daemon github.com/coreos/go-systemd/journal -# github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf +# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f ## explicit github.com/coreos/pkg/capnslog github.com/coreos/pkg/flagutil # github.com/davecgh/go-spew v1.1.1 +## explicit github.com/davecgh/go-spew/spew -# github.com/denverdino/aliyungo v0.0.0-20170629053852-f6cab0c35083 +# github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba ## explicit github.com/denverdino/aliyungo/common github.com/denverdino/aliyungo/ecs @@ -87,36 +100,54 @@ github.com/denverdino/aliyungo/util ## explicit github.com/go-ini/ini # github.com/go-logr/logr v0.2.0 +## explicit; go 1.14 github.com/go-logr/logr -# github.com/gogo/protobuf v1.3.1 +# github.com/gogo/protobuf v1.3.2 +## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys -# github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 +# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.4.2 +# github.com/golang/protobuf v1.5.2 +## explicit; go 1.9 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/google/btree v1.0.1 +## explicit; go 1.12 # github.com/google/go-cmp v0.5.6 +## explicit; go 1.8 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.1.0 +## explicit; go 1.12 github.com/google/gofuzz # github.com/googleapis/gax-go/v2 v2.0.5 +## explicit github.com/googleapis/gax-go/v2 # github.com/googleapis/gnostic v0.4.1 +## explicit; go 1.12 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/openapiv2 +# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 +## explicit; go 1.14 +# github.com/grpc-ecosystem/grpc-gateway v1.16.0 +## explicit; go 1.14 # github.com/hashicorp/golang-lru v0.5.1 +## explicit github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/imdario/mergo v0.3.5 +# github.com/imdario/mergo v0.3.12 +## explicit; go 1.13 github.com/imdario/mergo # github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 ## explicit @@ -124,55 +155,74 @@ github.com/jmespath/go-jmespath # github.com/joho/godotenv v0.0.0-20161216230537-726cc8b906e3 ## explicit github.com/joho/godotenv -# github.com/jonboulle/clockwork v0.1.0 -## explicit +# github.com/jonboulle/clockwork v0.2.2 +## explicit; go 1.13 github.com/jonboulle/clockwork # github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 +## explicit; go 1.13 github.com/josharian/native -# github.com/json-iterator/go v1.1.10 +# github.com/json-iterator/go v1.1.11 +## explicit; go 1.12 github.com/json-iterator/go # github.com/mdlayher/genetlink v1.0.0 +## explicit; go 1.13 github.com/mdlayher/genetlink # github.com/mdlayher/netlink v1.4.1 +## explicit; go 1.12 github.com/mdlayher/netlink github.com/mdlayher/netlink/nlenc # github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 +## explicit; go 1.16 github.com/mdlayher/socket # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd +## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 -github.com/modern-go/reflect2 -# github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e ## explicit +github.com/modern-go/reflect2 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/sirupsen/logrus v1.0.6 +# github.com/prometheus/client_golang v1.11.0 +## explicit; go 1.13 +# github.com/sirupsen/logrus v1.8.1 +## explicit; go 1.13 github.com/sirupsen/logrus # github.com/smartystreets/goconvey v1.6.4 ## explicit +# github.com/soheilhy/cmux v0.1.5 +## explicit; go 1.11 # github.com/spf13/pflag v1.0.5 +## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.6.1 -## explicit # github.com/tencentcloud/tencentcloud-sdk-go v1.0.67 -## explicit +## explicit; go 1.14 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/vpc/v20170312 -# github.com/ugorji/go v0.0.0-20170107133203-ded73eae5db7 -## explicit -github.com/ugorji/go/codec -# github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf +# github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 ## explicit +# github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 +## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc -## explicit +# github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae +## explicit; go 1.12 github.com/vishvananda/netns -# go.opencensus.io v0.22.2 +# go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 +## explicit; go 1.14 +go.etcd.io/etcd/client +go.etcd.io/etcd/pkg/fileutil +go.etcd.io/etcd/pkg/pathutil +go.etcd.io/etcd/pkg/srv +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types +go.etcd.io/etcd/version +# go.opencensus.io v0.22.3 +## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal go.opencensus.io/internal/tagencoding @@ -189,12 +239,27 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.uber.org/atomic v1.7.0 +## explicit; go 1.13 +go.uber.org/atomic +# go.uber.org/multierr v1.6.0 +## explicit; go 1.12 +go.uber.org/multierr +# go.uber.org/zap v1.17.0 +## explicit; go 1.13 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 +## explicit; go 1.17 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.0.0-20211020060615-d418f374d309 -## explicit +## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -204,33 +269,37 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 -## explicit +# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d +## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20211020174200-9d6173849985 +# golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e +## explicit; go 1.17 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry # golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 +## explicit; go 1.11 golang.org/x/term # golang.org/x/text v0.3.6 +## explicit; go 1.11 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20191024005414-555d28b269f0 +# golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba +## explicit golang.org/x/time/rate # golang.zx2c4.com/wireguard v0.0.0-20211022192229-f1f626090e3b -## explicit +## explicit; go 1.17 golang.zx2c4.com/wireguard/ipc/winpipe # golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211019185142-5be1d6054c42 -## explicit +## explicit; go 1.13 golang.zx2c4.com/wireguard/wgctrl golang.zx2c4.com/wireguard/wgctrl/internal/wginternal golang.zx2c4.com/wireguard/wgctrl/internal/wglinux @@ -242,8 +311,8 @@ golang.zx2c4.com/wireguard/wgctrl/internal/wgwindows golang.zx2c4.com/wireguard/wgctrl/internal/wgwindows/internal/ioctl golang.zx2c4.com/wireguard/wgctrl/internal/wgwindows/internal/setupapi golang.zx2c4.com/wireguard/wgctrl/wgtypes -# google.golang.org/api v0.15.0 -## explicit +# google.golang.org/api v0.22.0 +## explicit; go 1.11 google.golang.org/api/compute/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -251,9 +320,12 @@ google.golang.org/api/internal google.golang.org/api/internal/gensupport google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/option +google.golang.org/api/option/internaloption +google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.5 +# google.golang.org/appengine v1.6.6 +## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal google.golang.org/appengine/internal/app_identity @@ -264,9 +336,11 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 +# google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c +## explicit; go 1.11 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.27.0 +# google.golang.org/grpc v1.40.0 => google.golang.org/grpc v1.29.0 +## explicit; go 1.11 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -288,10 +362,13 @@ google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive @@ -303,7 +380,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.24.0 +# google.golang.org/protobuf v1.27.1 +## explicit; go 1.9 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -314,36 +392,37 @@ google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/fieldnum -google.golang.org/protobuf/internal/fieldsort google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags -google.golang.org/protobuf/internal/genname +google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl +google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f -## explicit # gopkg.in/inf.v0 v0.9.1 +## explicit gopkg.in/inf.v0 -# gopkg.in/yaml.v2 v2.2.8 +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 gopkg.in/yaml.v2 -# k8s.io/api v0.19.5 -## explicit +# k8s.io/api v0.20.6 +## explicit; go 1.15 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apiserverinternal/v1alpha1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 @@ -368,8 +447,10 @@ k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/flowcontrol/v1alpha1 +k8s.io/api/flowcontrol/v1beta1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1 k8s.io/api/node/v1alpha1 k8s.io/api/node/v1beta1 k8s.io/api/policy/v1beta1 @@ -379,12 +460,11 @@ k8s.io/api/rbac/v1beta1 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 -k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.19.5 -## explicit +# k8s.io/apimachinery v0.20.6 +## explicit; go 1.15 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource @@ -427,13 +507,14 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.19.5 -## explicit +# k8s.io/client-go v0.20.6 +## explicit; go 1.15 k8s.io/client-go/discovery k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 +k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1beta2 @@ -458,8 +539,10 @@ k8s.io/client-go/kubernetes/typed/events/v1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1beta1 +k8s.io/client-go/kubernetes/typed/node/v1 k8s.io/client-go/kubernetes/typed/node/v1alpha1 k8s.io/client-go/kubernetes/typed/node/v1beta1 k8s.io/client-go/kubernetes/typed/policy/v1beta1 @@ -469,7 +552,6 @@ k8s.io/client-go/kubernetes/typed/rbac/v1beta1 k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 -k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 @@ -498,17 +580,23 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue # k8s.io/klog v1.0.0 -## explicit +## explicit; go 1.12 k8s.io/klog -# k8s.io/klog/v2 v2.2.0 +# k8s.io/klog/v2 v2.4.0 +## explicit; go 1.13 k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 +# k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd +## explicit; go 1.12 k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20200729134348-d5654de09c73 +# k8s.io/utils v0.0.0-20201110183641-67b214c5f920 +## explicit; go 1.12 k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/trace -# sigs.k8s.io/structured-merge-diff/v4 v4.0.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.0.3 +## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 +## explicit; go 1.12 sigs.k8s.io/yaml +# google.golang.org/grpc => google.golang.org/grpc v1.29.0 diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index 49e6dd169..a5a467c0f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -70,11 +70,11 @@ func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) } -// GetUsing returns the field identified by this FieldCacheEntry from the provided struct. +// GetFrom returns the field identified by this FieldCacheEntry from the provided struct. func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { // field might be nested within 'inline' structs for _, elem := range f.fieldPath { - structVal = structVal.FieldByIndex(elem) + structVal = dereference(structVal).FieldByIndex(elem) } return structVal } @@ -150,7 +150,11 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi continue } if isInline { - buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) + e := field.Type + if field.Type.Kind() == reflect.Ptr { + e = field.Type.Elem() + } + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod deleted file mode 100644 index 7224f3497..000000000 --- a/vendor/sigs.k8s.io/yaml/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module sigs.k8s.io/yaml - -go 1.12 - -require ( - github.com/davecgh/go-spew v1.1.1 - gopkg.in/yaml.v2 v2.2.8 -) diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum deleted file mode 100644 index 76e49483a..000000000 --- a/vendor/sigs.k8s.io/yaml/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 715744d2710877bcd4c95c429fd861a1f19d3e6d Mon Sep 17 00:00:00 2001 From: Luther Monson Date: Thu, 27 Jan 2022 09:03:40 -0700 Subject: [PATCH 2/3] updates for package changes --- Documentation/running.md | 5 ----- README.md | 2 +- backend/wireguard/device.go | 1 + bill-of-materials.json | 2 +- subnet/etcdv2/local_manager.go | 2 +- subnet/etcdv2/mock_etcd.go | 2 +- subnet/etcdv2/mock_etcd_test.go | 2 +- subnet/etcdv2/mock_registry.go | 2 +- subnet/etcdv2/registry.go | 10 +++++----- subnet/etcdv2/registry_test.go | 2 +- 10 files changed, 13 insertions(+), 17 deletions(-) diff --git a/Documentation/running.md b/Documentation/running.md index 669a54ccb..26bffc49e 100644 --- a/Documentation/running.md +++ b/Documentation/running.md @@ -128,8 +128,3 @@ As such, `flanneld` can be restarted (even to do an upgrade) without disturbing However in the case of `vxlan` backend, this needs to be done within a few seconds as ARP entries can start to timeout requiring the flannel daemon to refresh them. Also, to avoid interruptions during restart, the configuration must not be changed (e.g. VNI, --iface values). - - -[coreos-etcd]: https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/local_cluster.md -[configuring-flannel]: https://coreos.com/docs/cluster-management/setup/flannel-config/ -[leases]: reservations.md diff --git a/README.md b/README.md index a7194fcb6..956b28cb0 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ Flannel is under the Apache 2.0 license. See the [LICENSE][license] file for det [calico]: http://www.projectcalico.org [pod-cidr]: https://kubernetes.io/docs/admin/kubelet/ -[etcd]: https://github.com/coreos/etcd +[etcd]: https://go.etcd.io/etcd/v3 [contributing]: CONTRIBUTING.md [license]: https://github.com/flannel-io/flannel/blob/master/LICENSE [milestones]: https://github.com/flannel-io/flannel/milestones diff --git a/backend/wireguard/device.go b/backend/wireguard/device.go index 32f216145..d38dfdcdb 100644 --- a/backend/wireguard/device.go +++ b/backend/wireguard/device.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Copyright 2021 flannel authors diff --git a/bill-of-materials.json b/bill-of-materials.json index 82d5b860a..94a47d421 100644 --- a/bill-of-materials.json +++ b/bill-of-materials.json @@ -15,7 +15,7 @@ "confidence": 1 }, { - "project": "github.com/coreos/etcd", + "project": "go.etcd.io/etcd/v3", "license": "Apache License 2.0", "confidence": 1 }, diff --git a/subnet/etcdv2/local_manager.go b/subnet/etcdv2/local_manager.go index f02959f2d..e69c65653 100644 --- a/subnet/etcdv2/local_manager.go +++ b/subnet/etcdv2/local_manager.go @@ -20,9 +20,9 @@ import ( "strconv" "time" - etcd "github.com/coreos/etcd/client" "github.com/flannel-io/flannel/pkg/ip" . "github.com/flannel-io/flannel/subnet" + etcd "go.etcd.io/etcd/client" "golang.org/x/net/context" log "k8s.io/klog" ) diff --git a/subnet/etcdv2/mock_etcd.go b/subnet/etcdv2/mock_etcd.go index fa7383edd..f0f17a6af 100644 --- a/subnet/etcdv2/mock_etcd.go +++ b/subnet/etcdv2/mock_etcd.go @@ -20,7 +20,7 @@ import ( "sync" "time" - etcd "github.com/coreos/etcd/client" + etcd "go.etcd.io/etcd/client" "golang.org/x/net/context" ) diff --git a/subnet/etcdv2/mock_etcd_test.go b/subnet/etcdv2/mock_etcd_test.go index 4dd33676c..ed63667c2 100644 --- a/subnet/etcdv2/mock_etcd_test.go +++ b/subnet/etcdv2/mock_etcd_test.go @@ -18,7 +18,7 @@ import ( "fmt" "testing" - etcd "github.com/coreos/etcd/client" + etcd "go.etcd.io/etcd/client" "golang.org/x/net/context" ) diff --git a/subnet/etcdv2/mock_registry.go b/subnet/etcdv2/mock_registry.go index fc3ab7a0c..97ecc053f 100644 --- a/subnet/etcdv2/mock_registry.go +++ b/subnet/etcdv2/mock_registry.go @@ -19,10 +19,10 @@ import ( "sync" "time" - etcd "github.com/coreos/etcd/client" "github.com/flannel-io/flannel/pkg/ip" . "github.com/flannel-io/flannel/subnet" "github.com/jonboulle/clockwork" + etcd "go.etcd.io/etcd/client" "golang.org/x/net/context" ) diff --git a/subnet/etcdv2/registry.go b/subnet/etcdv2/registry.go index d9b50e082..13101adfa 100644 --- a/subnet/etcdv2/registry.go +++ b/subnet/etcdv2/registry.go @@ -23,10 +23,10 @@ import ( "sync" "time" - etcd "github.com/coreos/etcd/client" - "github.com/coreos/etcd/pkg/transport" "github.com/flannel-io/flannel/pkg/ip" . "github.com/flannel-io/flannel/subnet" + etcd "go.etcd.io/etcd/client" + "go.etcd.io/etcd/pkg/transport" "golang.org/x/net/context" log "k8s.io/klog" ) @@ -68,9 +68,9 @@ type etcdSubnetRegistry struct { func newEtcdClient(c *EtcdConfig) (etcd.KeysAPI, error) { tlsInfo := transport.TLSInfo{ - CertFile: c.Certfile, - KeyFile: c.Keyfile, - CAFile: c.CAFile, + CertFile: c.Certfile, + KeyFile: c.Keyfile, + TrustedCAFile: c.CAFile, } t, err := transport.NewTransport(tlsInfo, time.Second) diff --git a/subnet/etcdv2/registry_test.go b/subnet/etcdv2/registry_test.go index ed98b3d17..3ce822cc1 100644 --- a/subnet/etcdv2/registry_test.go +++ b/subnet/etcdv2/registry_test.go @@ -20,9 +20,9 @@ import ( "testing" "time" - etcd "github.com/coreos/etcd/client" "github.com/flannel-io/flannel/pkg/ip" . "github.com/flannel-io/flannel/subnet" + etcd "go.etcd.io/etcd/client" "golang.org/x/net/context" ) From 4c23922a2ca168479e5b211f68c21440ce2497a6 Mon Sep 17 00:00:00 2001 From: Luther Monson Date: Thu, 27 Jan 2022 09:09:33 -0700 Subject: [PATCH 3/3] +1 line for new go v1.17 build tag format --- header-check.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/header-check.sh b/header-check.sh index 70b367cb8..ef16e341c 100755 --- a/header-check.sh +++ b/header-check.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash licRes=$(for file in $(find . -type f -iname '*.go' ! -path './vendor/*'); do - head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e " ${file}" + head -n4 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e " ${file}" done;) if [ -n "${licRes}" ]; then echo -e "license header checking failed:\n${licRes}"